diff --git a/.coveragerc b/.coveragerc index 3128ad99e..f12d4dc21 100644 --- a/.coveragerc +++ b/.coveragerc @@ -18,13 +18,11 @@ [run] branch = True omit = - google/cloud/__init__.py - google/__init__.py google/cloud/bigtable_admin/__init__.py google/cloud/bigtable_admin/gapic_version.py [report] -fail_under = 100 +fail_under = 99 show_missing = True exclude_lines = # Re-enable the standard pragma @@ -33,11 +31,5 @@ exclude_lines = def __repr__ # Ignore abstract methods raise NotImplementedError - # Ignore setuptools-less fallback - except pkg_resources.DistributionNotFound: omit = - */gapic/*.py - */proto/*.py - */core/*.py */site-packages/*.py - google/cloud/__init__.py diff --git a/.cross_sync/README.md b/.cross_sync/README.md new file mode 100644 index 000000000..0d8a1cf8c --- /dev/null +++ b/.cross_sync/README.md @@ -0,0 +1,75 @@ +# CrossSync + +CrossSync provides a simple way to share logic between async and sync code. +It is made up of a small library that provides: +1. a set of shims that provide a shared sync/async API surface +2. annotations that are used to guide generation of a sync version from an async class + +Using CrossSync, the async code is treated as the source of truth, and sync code is generated from it. + +## Usage + +### CrossSync Shims + +Many Asyncio components have direct, 1:1 threaded counterparts for use in non-asyncio code. CrossSync +provides a compatibility layer that works with both + +| CrossSync | Asyncio Version | Sync Version | +| --- | --- | --- | +| CrossSync.Queue | asyncio.Queue | queue.Queue | +| CrossSync.Condition | asyncio.Condition | threading.Condition | +| CrossSync.Future | asyncio.Future | Concurrent.futures.Future | +| CrossSync.Task | asyncio.Task | Concurrent.futures.Future | +| CrossSync.Event | asyncio.Event | threading.Event | +| CrossSync.Semaphore | asyncio.Semaphore | threading.Semaphore | +| CrossSync.Awaitable | typing.Awaitable | typing.Union (no-op type) | +| CrossSync.Iterable | typing.AsyncIterable | typing.Iterable | +| CrossSync.Iterator | typing.AsyncIterator | typing.Iterator | +| CrossSync.Generator | typing.AsyncGenerator | typing.Generator | +| CrossSync.Retry | google.api_core.retry.AsyncRetry | google.api_core.retry.Retry | +| CrossSync.StopIteration | StopAsyncIteration | StopIteration | +| CrossSync.Mock | unittest.mock.AsyncMock | unittest.mock.Mock | + +Custom aliases can be added using `CrossSync.add_mapping(class, name)` + +Additionally, CrossSync provides method implementations that work equivalently in async and sync code: +- `CrossSync.sleep()` +- `CrossSync.gather_partials()` +- `CrossSync.wait()` +- `CrossSync.condition_wait()` +- `CrossSync,event_wait()` +- `CrossSync.create_task()` +- `CrossSync.retry_target()` +- `CrossSync.retry_target_stream()` + +### Annotations + +CrossSync provides a set of annotations to mark up async classes, to guide the generation of sync code. + +- `@CrossSync.convert_sync` + - marks classes for conversion. Unmarked classes will be copied as-is + - if add_mapping is included, the async and sync classes can be accessed using a shared CrossSync.X alias +- `@CrossSync.convert` + - marks async functions for conversion. Unmarked methods will be copied as-is +- `@CrossSync.drop` + - marks functions or classes that should not be included in sync output +- `@CrossSync.pytest` + - marks test functions. Test functions automatically have all async keywords stripped (i.e., rm_aio is unneeded) +- `CrossSync.add_mapping` + - manually registers a new CrossSync.X alias, for custom types +- `CrossSync.rm_aio` + - Marks regions of the code that include asyncio keywords that should be stripped during generation + +### Code Generation + +Generation can be initiated using `nox -s generate_sync` +from the root of the project. This will find all classes with the `__CROSS_SYNC_OUTPUT__ = "path/to/output"` +annotation, and generate a sync version of classes marked with `@CrossSync.convert_sync` at the output path. + +There is a unit test at `tests/unit/data/test_sync_up_to_date.py` that verifies that the generated code is up to date + +## Architecture + +CrossSync is made up of two parts: +- the runtime shims and annotations live in `/google/cloud/bigtable/_cross_sync` +- the code generation logic lives in `/.cross_sync/` in the repo root diff --git a/.cross_sync/generate.py b/.cross_sync/generate.py new file mode 100644 index 000000000..5158d0f37 --- /dev/null +++ b/.cross_sync/generate.py @@ -0,0 +1,107 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations +from typing import Sequence +import ast +""" +Entrypoint for initiating an async -> sync conversion using CrossSync + +Finds all python files rooted in a given directory, and uses +transformers.CrossSyncFileProcessor to handle any files marked with +__CROSS_SYNC_OUTPUT__ +""" + + +def extract_header_comments(file_path) -> str: + """ + Extract the file header. Header is defined as the top-level + comments before any code or imports + """ + header = [] + with open(file_path, "r") as f: + for line in f: + if line.startswith("#") or line.strip() == "": + header.append(line) + else: + break + header.append("\n# This file is automatically generated by CrossSync. Do not edit manually.\n\n") + return "".join(header) + + +class CrossSyncOutputFile: + + def __init__(self, output_path: str, ast_tree, header: str | None = None): + self.output_path = output_path + self.tree = ast_tree + self.header = header or "" + + def render(self, with_formatter=True, save_to_disk: bool = True) -> str: + """ + Render the file to a string, and optionally save to disk + + Args: + with_formatter: whether to run the output through black before returning + save_to_disk: whether to write the output to the file path + """ + full_str = self.header + ast.unparse(self.tree) + if with_formatter: + import black # type: ignore + import autoflake # type: ignore + + full_str = black.format_str( + autoflake.fix_code(full_str, remove_all_unused_imports=True), + mode=black.FileMode(), + ) + if save_to_disk: + import os + os.makedirs(os.path.dirname(self.output_path), exist_ok=True) + with open(self.output_path, "w") as f: + f.write(full_str) + return full_str + + +def convert_files_in_dir(directory: str) -> set[CrossSyncOutputFile]: + import glob + from transformers import CrossSyncFileProcessor + + # find all python files in the directory + files = glob.glob(directory + "/**/*.py", recursive=True) + # keep track of the output files pointed to by the annotated classes + artifacts: set[CrossSyncOutputFile] = set() + file_transformer = CrossSyncFileProcessor() + # run each file through ast transformation to find all annotated classes + for file_path in files: + ast_tree = ast.parse(open(file_path).read()) + output_path = file_transformer.get_output_path(ast_tree) + if output_path is not None: + # contains __CROSS_SYNC_OUTPUT__ annotation + converted_tree = file_transformer.visit(ast_tree) + header = extract_header_comments(file_path) + artifacts.add(CrossSyncOutputFile(output_path, converted_tree, header)) + # return set of output artifacts + return artifacts + + +def save_artifacts(artifacts: Sequence[CrossSyncOutputFile]): + for a in artifacts: + a.render(save_to_disk=True) + + +if __name__ == "__main__": + import sys + + search_root = sys.argv[1] + outputs = convert_files_in_dir(search_root) + print(f"Generated {len(outputs)} artifacts: {[a.output_path for a in outputs]}") + save_artifacts(outputs) diff --git a/.cross_sync/transformers.py b/.cross_sync/transformers.py new file mode 100644 index 000000000..9adadd0aa --- /dev/null +++ b/.cross_sync/transformers.py @@ -0,0 +1,338 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provides a set of ast.NodeTransformer subclasses that are composed to generate +async code into sync code. + +At a high level: +- The main entrypoint is CrossSyncFileProcessor, which is used to find files in + the codebase that include __CROSS_SYNC_OUTPUT__, and transform them + according to the `CrossSync` annotations they contains +- SymbolReplacer is used to swap out CrossSync.X with CrossSync._Sync_Impl.X +- RmAioFunctions is used to strip out asyncio keywords marked with CrossSync.rm_aio + (deferring to AsyncToSync to handle the actual transformation) +- StripAsyncConditionalBranches finds `if CrossSync.is_async:` conditionals, and strips out + the unneeded branch for the sync output +""" +from __future__ import annotations + +import ast + +import sys +# add cross_sync to path +sys.path.append("google/cloud/bigtable/data/_cross_sync") +from _decorators import AstDecorator + + +class SymbolReplacer(ast.NodeTransformer): + """ + Replaces all instances of a symbol in an AST with a replacement + + Works for function signatures, method calls, docstrings, and type annotations + """ + def __init__(self, replacements: dict[str, str]): + self.replacements = replacements + + def visit_Name(self, node): + if node.id in self.replacements: + node.id = self.replacements[node.id] + return node + + def visit_Attribute(self, node): + return ast.copy_location( + ast.Attribute( + self.visit(node.value), + self.replacements.get(node.attr, node.attr), + node.ctx, + ), + node, + ) + + def visit_AsyncFunctionDef(self, node): + """ + Replace async function docstrings + """ + # use same logic as FunctionDef + return self.visit_FunctionDef(node) + + def visit_FunctionDef(self, node): + """ + Replace function docstrings + """ + docstring = ast.get_docstring(node) + if docstring and isinstance(node.body[0], ast.Expr) \ + and isinstance(node.body[0].value, ast.Constant) \ + and isinstance(node.body[0].value.value, str) \ + : + for key_word, replacement in self.replacements.items(): + docstring = docstring.replace(key_word, replacement) + node.body[0].value.value = docstring + return self.generic_visit(node) + + def visit_Constant(self, node): + """Replace string type annotations""" + try: + node.value = self.replacements.get(node.value, node.value) + except TypeError: + # ignore unhashable types (e.g. list) + pass + return node + + +class AsyncToSync(ast.NodeTransformer): + """ + Replaces or strips all async keywords from a given AST + """ + def visit_Await(self, node): + """ + Strips await keyword + """ + return self.visit(node.value) + + def visit_AsyncFor(self, node): + """ + Replaces `async for` with `for` + """ + return ast.copy_location( + ast.For( + self.visit(node.target), + self.visit(node.iter), + [self.visit(stmt) for stmt in node.body], + [self.visit(stmt) for stmt in node.orelse], + ), + node, + ) + + def visit_AsyncWith(self, node): + """ + Replaces `async with` with `with` + """ + return ast.copy_location( + ast.With( + [self.visit(item) for item in node.items], + [self.visit(stmt) for stmt in node.body], + ), + node, + ) + + def visit_AsyncFunctionDef(self, node): + """ + Replaces `async def` with `def` + """ + return ast.copy_location( + ast.FunctionDef( + node.name, + self.visit(node.args), + [self.visit(stmt) for stmt in node.body], + [self.visit(decorator) for decorator in node.decorator_list], + node.returns and self.visit(node.returns), + ), + node, + ) + + def visit_ListComp(self, node): + """ + Replaces `async for` with `for` in list comprehensions + """ + for generator in node.generators: + generator.is_async = False + return self.generic_visit(node) + + +class RmAioFunctions(ast.NodeTransformer): + """ + Visits all calls marked with CrossSync.rm_aio, and removes asyncio keywords + """ + RM_AIO_FN_NAME = "rm_aio" + RM_AIO_CLASS_NAME = "CrossSync" + + def __init__(self): + self.to_sync = AsyncToSync() + + def _is_rm_aio_call(self, node) -> bool: + """ + Check if a node is a CrossSync.rm_aio call + """ + if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name): + if node.func.attr == self.RM_AIO_FN_NAME and node.func.value.id == self.RM_AIO_CLASS_NAME: + return True + return False + + def visit_Call(self, node): + if self._is_rm_aio_call(node): + return self.visit(self.to_sync.visit(node.args[0])) + return self.generic_visit(node) + + def visit_AsyncWith(self, node): + """ + `async with` statements can contain multiple async context managers. + + If any of them contains a CrossSync.rm_aio statement, convert into standard `with` statement + """ + if any(self._is_rm_aio_call(item.context_expr) for item in node.items + ): + new_node = ast.copy_location( + ast.With( + [self.visit(item) for item in node.items], + [self.visit(stmt) for stmt in node.body], + ), + node, + ) + return self.generic_visit(new_node) + return self.generic_visit(node) + + def visit_AsyncFor(self, node): + """ + Async for statements are not fully wrapped by calls + """ + it = node.iter + if self._is_rm_aio_call(it): + return ast.copy_location( + ast.For( + self.visit(node.target), + self.visit(it), + [self.visit(stmt) for stmt in node.body], + [self.visit(stmt) for stmt in node.orelse], + ), + node, + ) + return self.generic_visit(node) + + +class StripAsyncConditionalBranches(ast.NodeTransformer): + """ + Visits all if statements in an AST, and removes branches marked with CrossSync.is_async + """ + + def visit_If(self, node): + """ + remove CrossSync.is_async branches from top-level if statements + """ + kept_branch = None + # check for CrossSync.is_async + if self._is_async_check(node.test): + kept_branch = node.orelse + # check for not CrossSync.is_async + elif isinstance(node.test, ast.UnaryOp) and isinstance(node.test.op, ast.Not) and self._is_async_check(node.test.operand): + kept_branch = node.body + if kept_branch is not None: + # only keep the statements in the kept branch + return [self.visit(n) for n in kept_branch] + else: + # keep the entire if statement + return self.generic_visit(node) + + def _is_async_check(self, node) -> bool: + """ + Check for CrossSync.is_async or CrossSync.is_async == True checks + """ + if isinstance(node, ast.Attribute): + # for CrossSync.is_async + return isinstance(node.value, ast.Name) and node.value.id == "CrossSync" and node.attr == "is_async" + elif isinstance(node, ast.Compare): + # for CrossSync.is_async == True + return self._is_async_check(node.left) and (isinstance(node.ops[0], ast.Eq) or isinstance(node.ops[0], ast.Is)) and len(node.comparators) == 1 and node.comparators[0].value == True + return False + + +class CrossSyncFileProcessor(ast.NodeTransformer): + """ + Visits a file, looking for __CROSS_SYNC_OUTPUT__ annotations + + If found, the file is processed with the following steps: + - Strip out asyncio keywords within CrossSync.rm_aio calls + - transform classes and methods annotated with CrossSync decorators + - statements behind CrossSync.is_async conditional branches are removed + - Replace remaining CrossSync statements with corresponding CrossSync._Sync_Impl calls + - save changes in an output file at path specified by __CROSS_SYNC_OUTPUT__ + """ + FILE_ANNOTATION = "__CROSS_SYNC_OUTPUT__" + + def get_output_path(self, node): + for n in node.body: + if isinstance(n, ast.Assign): + for target in n.targets: + if isinstance(target, ast.Name) and target.id == self.FILE_ANNOTATION: + # return the output path + return n.value.value.replace(".", "/") + ".py" + + def visit_Module(self, node): + # look for __CROSS_SYNC_OUTPUT__ Assign statement + output_path = self.get_output_path(node) + if output_path: + # if found, process the file + converted = self.generic_visit(node) + # strip out CrossSync.rm_aio calls + converted = RmAioFunctions().visit(converted) + # strip out CrossSync.is_async branches + converted = StripAsyncConditionalBranches().visit(converted) + # replace CrossSync statements + converted = SymbolReplacer({"CrossSync": "CrossSync._Sync_Impl"}).visit(converted) + return converted + else: + # not cross_sync file. Return None + return None + + def visit_ClassDef(self, node): + """ + Called for each class in file. If class has a CrossSync decorator, it will be transformed + according to the decorator arguments. Otherwise, class is returned unchanged + """ + orig_decorators = node.decorator_list + for decorator in orig_decorators: + try: + handler = AstDecorator.get_for_node(decorator) + # transformation is handled in sync_ast_transform method of the decorator + node = handler.sync_ast_transform(node, globals()) + except ValueError: + # not cross_sync decorator + continue + return self.generic_visit(node) if node else None + + def visit_Assign(self, node): + """ + strip out __CROSS_SYNC_OUTPUT__ assignments + """ + if isinstance(node.targets[0], ast.Name) and node.targets[0].id == self.FILE_ANNOTATION: + return None + return self.generic_visit(node) + + def visit_FunctionDef(self, node): + """ + Visit any sync methods marked with CrossSync decorators + """ + return self.visit_AsyncFunctionDef(node) + + def visit_AsyncFunctionDef(self, node): + """ + Visit and transform any async methods marked with CrossSync decorators + """ + try: + if hasattr(node, "decorator_list"): + found_list, node.decorator_list = node.decorator_list, [] + for decorator in found_list: + try: + handler = AstDecorator.get_for_node(decorator) + node = handler.sync_ast_transform(node, globals()) + if node is None: + return None + # recurse to any nested functions + node = self.generic_visit(node) + except ValueError: + # keep unknown decorators + node.decorator_list.append(decorator) + continue + return self.generic_visit(node) + except ValueError as e: + raise ValueError(f"node {node.name} failed") from e diff --git a/.flake8 b/.flake8 index 87f6e408c..32986c792 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2f1fee904..4012444e4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,8 +5,8 @@ # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax # Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json. -# @googleapis/yoshi-python @googleapis/api-bigtable are the default owners for changes in this repo -* @googleapis/yoshi-python @googleapis/api-bigtable +# @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for changes in this repo +* @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners -# @googleapis/python-samples-reviewers @googleapis/api-bigtable are the default owners for samples changes -/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable +# @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for samples changes +/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners @googleapis/cloud-sdk-python-team diff --git a/.github/auto-approve.yml b/.github/auto-approve.yml deleted file mode 100644 index 311ebbb85..000000000 --- a/.github/auto-approve.yml +++ /dev/null @@ -1,3 +0,0 @@ -# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve -processes: - - "OwlBotTemplateChanges" diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml index b2016d119..21786a4eb 100644 --- a/.github/auto-label.yaml +++ b/.github/auto-label.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,3 +13,8 @@ # limitations under the License. requestsize: enabled: true + +path: + pullrequest: true + paths: + samples: "samples" diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml new file mode 100644 index 000000000..1e27e789a --- /dev/null +++ b/.github/blunderbuss.yml @@ -0,0 +1,20 @@ +# Blunderbuss config +# +# This file controls who is assigned for pull requests and issues. +# Note: This file is autogenerated. To make changes to the assignee +# team, please update `codeowner_team` in `.repo-metadata.json`. +assign_issues: + - googleapis/api-bigtable + - googleapis/api-bigtable-partners + +assign_issues_by: + - labels: + - "samples" + to: + - googleapis/python-samples-reviewers + - googleapis/api-bigtable + - googleapis/api-bigtable-partners + +assign_prs: + - googleapis/api-bigtable + - googleapis/api-bigtable-partners diff --git a/.github/.OwlBot.lock.yaml b/.github/flakybot.yaml similarity index 70% rename from .github/.OwlBot.lock.yaml rename to .github/flakybot.yaml index a3da1b0d4..2159a1bca 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/flakybot.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,7 +11,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -docker: - image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 -# created: 2023-08-02T10:53:29.114535628Z + +issuePriority: p2 \ No newline at end of file diff --git a/.github/release-please.yml b/.github/release-please.yml deleted file mode 100644 index 593e83f9f..000000000 --- a/.github/release-please.yml +++ /dev/null @@ -1,12 +0,0 @@ -releaseType: python -handleGHRelease: true -# NOTE: this section is generated by synthtool.languages.python -# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py -manifest: true -branches: -- branch: v1 - handleGHRelease: true - releaseType: python -- branch: v0 - handleGHRelease: true - releaseType: python diff --git a/.github/release-trigger.yml b/.github/release-trigger.yml deleted file mode 100644 index d4ca94189..000000000 --- a/.github/release-trigger.yml +++ /dev/null @@ -1 +0,0 @@ -enabled: true diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml deleted file mode 100644 index a0d3362c9..000000000 --- a/.github/sync-repo-settings.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Whether or not rebase-merging is enabled on this repository. -# Defaults to `true` -rebaseMergeAllowed: true - -# Whether or not squash-merging is enabled on this repository. -# Defaults to `true` -squashMergeAllowed: true - -# Whether or not PRs are merged with a merge commit on this repository. -# Defaults to `false` -mergeCommitAllowed: false - -# Rules for main branch protection -branchProtectionRules: -# Identifies the protection rule pattern. Name of the branch to be protected. -# Defaults to `main` -- pattern: main - # Can admins overwrite branch protection. - # Defaults to `true` - isAdminEnforced: true - # Number of approving reviews required to update matching branches. - # Defaults to `1` - requiredApprovingReviewCount: 1 - # Are reviews from code owners required to update matching branches. - # Defaults to `false` - requiresCodeOwnerReviews: true - # Require up to date branches - requiresStrictStatusChecks: false - # List of required status check contexts that must pass for commits to be accepted to matching branches. - requiredStatusCheckContexts: - - 'Kokoro' - - 'Kokoro system-3.8' - - 'cla/google' -# List of explicit permissions to add (additive only) -permissionRules: - # Team slug to add to repository permissions - - team: yoshi-admins - # Access level required, one of push|pull|admin|maintain|triage - permission: admin - # Team slug to add to repository permissions - - team: yoshi-python-admins - # Access level required, one of push|pull|admin|maintain|triage - permission: admin - # Team slug to add to repository permissions - - team: yoshi-python - # Access level required, one of push|pull|admin|maintain|triage - permission: push diff --git a/.github/workflows/conformance.yaml b/.github/workflows/conformance.yaml new file mode 100644 index 000000000..f7396eaa9 --- /dev/null +++ b/.github/workflows/conformance.yaml @@ -0,0 +1,64 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + push: + branches: + - main + pull_request: +name: Conformance +jobs: + conformance: + runs-on: ubuntu-latest + strategy: + matrix: + test-version: [ "v0.0.4" ] + py-version: [ 3.13 ] + client-type: [ "async", "sync"] + # None of the clients currently support reverse scans, execute query plan refresh, retry info, or routing cookie + include: + - client-type: "async" + test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie\"" + - client-type: "sync" + test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie|_Generic_MultiStream\"" + fail-fast: false + name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}" + steps: + - uses: actions/checkout@v4 + name: "Checkout python-bigtable" + - uses: actions/checkout@v4 + name: "Checkout conformance tests" + with: + repository: googleapis/cloud-bigtable-clients-test + ref: ${{ matrix.test-version }} + path: cloud-bigtable-clients-test + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.py-version }} + - uses: actions/setup-go@v5 + with: + go-version: '>=1.20.2' + - run: chmod +x .kokoro/conformance.sh + - run: pip install -e . + name: "Install python-bigtable from HEAD" + - run: go version + - run: .kokoro/conformance.sh + name: "Run tests" + env: + CLIENT_TYPE: ${{ matrix.client-type }} + PYTHONUNBUFFERED: 1 + TEST_ARGS: ${{ matrix.test_args }} + PROXY_PORT: 9999 + diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e97d89e48..2833fe98f 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -8,11 +8,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel @@ -24,11 +24,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 16d5a9e90..9a0598202 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,11 +8,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index c63242630..f2b78a536 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -8,11 +8,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/.github/workflows/system_emulated.yml b/.github/workflows/system_emulated.yml index e1f43fd40..d8bbbb639 100644 --- a/.github/workflows/system_emulated.yml +++ b/.github/workflows/system_emulated.yml @@ -7,20 +7,20 @@ on: jobs: run-systests: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.13' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v1.1.0 + uses: google-github-actions/setup-gcloud@v2.1.1 - name: Install / run Nox run: | diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 8057a7691..dad646c6b 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -5,15 +5,18 @@ on: name: unittest jobs: unit: - runs-on: ubuntu-latest + # TODO(https://github.com/googleapis/gapic-generator-python/issues/2303): use `ubuntu-latest` once this bug is fixed. + # Use ubuntu-22.04 until Python 3.7 is removed from the test matrix + # https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories + runs-on: ubuntu-22.04 strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install nox @@ -26,10 +29,11 @@ jobs: run: | nox -s unit-${{ matrix.python }} - name: Upload coverage results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: coverage-artifacts + name: coverage-artifact-${{ matrix.python }} path: .coverage-${{ matrix.python }} + include-hidden-files: true cover: runs-on: ubuntu-latest @@ -37,21 +41,21 @@ jobs: - unit steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install coverage run: | python -m pip install --upgrade setuptools pip wheel python -m pip install coverage - name: Download coverage results - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: coverage-artifacts path: .coverage-results/ - name: Report coverage results run: | - coverage combine .coverage-results/.coverage* - coverage report --show-missing --fail-under=100 + find .coverage-results -type f -name '*.zip' -exec unzip {} \; + coverage combine .coverage-results/**/.coverage* + coverage report --show-missing --fail-under=99 diff --git a/.gitignore b/.gitignore index b4243ced7..d083ea1dd 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ docs.metadata # Virtual environment env/ +venv/ # Test logs coverage.xml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..5fa9b1ed5 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "python-api-core"] + path = python-api-core + url = git@github.com:googleapis/python-api-core.git +[submodule "gapic-generator-fork"] + path = gapic-generator-fork + url = git@github.com:googleapis/gapic-generator-python.git diff --git a/.kokoro/build.sh b/.kokoro/build.sh index dec6b66a7..d41b45aa1 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,11 +15,13 @@ set -eo pipefail +CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") + if [[ -z "${PROJECT_ROOT:-}" ]]; then - PROJECT_ROOT="github/python-bigtable" + PROJECT_ROOT=$(realpath "${CURRENT_DIR}/..") fi -cd "${PROJECT_ROOT}" +pushd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -28,17 +30,16 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Setup service account credentials. -export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +if [[ -f "${KOKORO_GFILE_DIR}/service-account.json" ]] +then + export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +fi # Setup project id. -export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") - -# Remove old nox -python3 -m pip uninstall --yes --quiet nox-automation - -# Install nox -python3 -m pip install --upgrade --quiet nox -python3 -m nox --version +if [[ -f "${KOKORO_GFILE_DIR}/project-id.json" ]] +then + export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +fi # If this is a continuous build, send the test log to the FlakyBot. # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. @@ -53,7 +54,7 @@ fi # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3 -m nox -s ${NOX_SESSION:-} + python3 -m nox -s ${NOX_SESSION:-} else - python3 -m nox + python3 -m nox fi diff --git a/.kokoro/conformance.sh b/.kokoro/conformance.sh new file mode 100644 index 000000000..fd585142e --- /dev/null +++ b/.kokoro/conformance.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +## cd to the parent directory, i.e. the root of the git repo +cd $(dirname $0)/.. + +# Build and start the proxy in a separate process +pushd test_proxy +nohup python test_proxy.py --port $PROXY_PORT --client_type=$CLIENT_TYPE & +proxyPID=$! +popd + +# Kill proxy on exit +function cleanup() { + echo "Cleanup testbench"; + kill $proxyPID +} +trap cleanup EXIT + +# Run the conformance test +echo "running tests with args: $TEST_ARGS" +pushd cloud-bigtable-clients-test/tests +eval "go test -v -proxy_addr=:$PROXY_PORT $TEST_ARGS" +RETURN_CODE=$? +popd + +echo "exiting with ${RETURN_CODE}" +exit ${RETURN_CODE} diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile deleted file mode 100644 index 8e39a2cc4..000000000 --- a/.kokoro/docker/docs/Dockerfile +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ubuntu:22.04 - -ENV DEBIAN_FRONTEND noninteractive - -# Ensure local Python is preferred over distribution Python. -ENV PATH /usr/local/bin:$PATH - -# Install dependencies. -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - apt-transport-https \ - build-essential \ - ca-certificates \ - curl \ - dirmngr \ - git \ - gpg-agent \ - graphviz \ - libbz2-dev \ - libdb5.3-dev \ - libexpat1-dev \ - libffi-dev \ - liblzma-dev \ - libreadline-dev \ - libsnappy-dev \ - libssl-dev \ - libsqlite3-dev \ - portaudio19-dev \ - python3-distutils \ - redis-server \ - software-properties-common \ - ssh \ - sudo \ - tcl \ - tcl-dev \ - tk \ - tk-dev \ - uuid-dev \ - wget \ - zlib1g-dev \ - && add-apt-repository universe \ - && apt-get update \ - && apt-get -y install jq \ - && apt-get clean autoclean \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* \ - && rm -f /var/cache/apt/archives/*.deb - -###################### Install python 3.9.13 - -# Download python 3.9.13 -RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz - -# Extract files -RUN tar -xvf Python-3.9.13.tgz - -# Install python 3.9.13 -RUN ./Python-3.9.13/configure --enable-optimizations -RUN make altinstall - -###################### Install pip -RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3 /tmp/get-pip.py \ - && rm /tmp/get-pip.py - -# Test pip -RUN python3 -m pip - -CMD ["python3.8"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh deleted file mode 100755 index d653dd868..000000000 --- a/.kokoro/docker/docs/fetch_gpg_keys.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A script to fetch gpg keys with retry. -# Avoid jinja parsing the file. -# - -function retry { - if [[ "${#}" -le 1 ]]; then - echo "Usage: ${0} retry_count commands.." - exit 1 - fi - local retries=${1} - local command="${@:2}" - until [[ "${retries}" -le 0 ]]; do - $command && return 0 - if [[ $? -ne 0 ]]; then - echo "command failed, retrying" - ((retries--)) - fi - done - return 1 -} - -# 3.6.9, 3.7.5 (Ned Deily) -retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ - 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D - -# 3.8.0 (Ɓukasz Langa) -retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ - E3FF2839C048B25C084DEBE9B26995E310250568 - -# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg deleted file mode 100644 index 9b8937c57..000000000 --- a/.kokoro/docs/common.cfg +++ /dev/null @@ -1,66 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/publish-docs.sh" -} - -env_vars: { - key: "STAGING_BUCKET" - value: "docs-staging" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2` - value: "docs-staging-v2" -} - -# It will upload the docker image after successful builds. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "true" -} - -# It will always build the docker image. -env_vars: { - key: "TRAMPOLINE_DOCKERFILE" - value: ".kokoro/docker/docs/Dockerfile" -} - -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "docuploader_service_account" - } - } -} \ No newline at end of file diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg deleted file mode 100644 index 001770ea6..000000000 --- a/.kokoro/docs/docs-presubmit.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "STAGING_BUCKET" - value: "gcloud-python-test" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - value: "gcloud-python-test" -} - -# We only upload the image in the main `docs` build. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "false" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/build.sh" -} - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "docs docfx" -} diff --git a/.kokoro/docs/docs.cfg b/.kokoro/docs/docs.cfg deleted file mode 100644 index 8f43917d9..000000000 --- a/.kokoro/docs/docs.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh index 6f3972140..c435402f4 100755 --- a/.kokoro/populate-secrets.sh +++ b/.kokoro/populate-secrets.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC. +# Copyright 2024 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/presubmit/conformance.cfg b/.kokoro/presubmit/conformance.cfg new file mode 100644 index 000000000..4f44e8a78 --- /dev/null +++ b/.kokoro/presubmit/conformance.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "NOX_SESSION" + value: "conformance" +} diff --git a/.kokoro/presubmit/system-3.8.cfg b/.kokoro/presubmit/system-3.9.cfg similarity index 83% rename from .kokoro/presubmit/system-3.8.cfg rename to .kokoro/presubmit/system-3.9.cfg index f4bcee3db..b8ae66b37 100644 --- a/.kokoro/presubmit/system-3.8.cfg +++ b/.kokoro/presubmit/system-3.9.cfg @@ -3,5 +3,5 @@ # Only run this nox session. env_vars: { key: "NOX_SESSION" - value: "system-3.8" + value: "system-3.9" } \ No newline at end of file diff --git a/.kokoro/presubmit/system.cfg b/.kokoro/presubmit/system.cfg new file mode 100644 index 000000000..30956a3ab --- /dev/null +++ b/.kokoro/presubmit/system.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "system-3.10" +} diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh deleted file mode 100755 index 9eafe0be3..000000000 --- a/.kokoro/publish-docs.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -export PATH="${HOME}/.local/bin:${PATH}" - -# Install nox -python3 -m pip install --require-hashes -r .kokoro/requirements.txt -python3 -m nox --version - -# build docs -nox -s docs - -# create metadata -python3 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" - - -# docfx yaml files -nox -s docfx - -# create metadata. -python3 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release.sh b/.kokoro/release.sh deleted file mode 100755 index 2e1cbfa81..000000000 --- a/.kokoro/release.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Start the releasetool reporter -python3 -m pip install --require-hashes -r github/python-bigtable/.kokoro/requirements.txt -python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1") -cd github/python-bigtable -python3 setup.py sdist bdist_wheel -twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg deleted file mode 100644 index 2a8fd970c..000000000 --- a/.kokoro/release/common.cfg +++ /dev/null @@ -1,49 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/release.sh" -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google-cloud-pypi-token-keystore-1" - } - } -} - -# Tokens needed to report release status back to GitHub -env_vars: { - key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" -} - -# Store the packages we uploaded to PyPI. That way, we have a record of exactly -# what we published, which we can use to generate SBOMs and attestations. -action { - define_artifacts { - regex: "github/python-bigtable/**/*.tar.gz" - strip_prefix: "github/python-bigtable" - } -} diff --git a/.kokoro/release/release.cfg b/.kokoro/release/release.cfg deleted file mode 100644 index 8f43917d9..000000000 --- a/.kokoro/release/release.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in deleted file mode 100644 index ec867d9fd..000000000 --- a/.kokoro/requirements.in +++ /dev/null @@ -1,10 +0,0 @@ -gcp-docuploader -gcp-releasetool>=1.10.5 # required for compatibility with cryptography>=39.x -importlib-metadata -typing-extensions -twine -wheel -setuptools -nox>=2022.11.21 # required to remove dependency on py -charset-normalizer<3 -click<8.1.0 diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt deleted file mode 100644 index 029bd342d..000000000 --- a/.kokoro/requirements.txt +++ /dev/null @@ -1,496 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes requirements.in -# -argcomplete==2.0.0 \ - --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \ - --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e - # via nox -attrs==22.1.0 \ - --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ - --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c - # via gcp-releasetool -bleach==5.0.1 \ - --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \ - --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c - # via readme-renderer -cachetools==5.2.0 \ - --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ - --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db - # via google-auth -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 - # via requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 - # via cryptography -charset-normalizer==2.1.1 \ - --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ - --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f - # via - # -r requirements.in - # requests -click==8.0.4 \ - --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ - --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb - # via - # -r requirements.in - # gcp-docuploader - # gcp-releasetool -colorlog==6.7.0 \ - --hash=sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662 \ - --hash=sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5 - # via - # gcp-docuploader - # nox -commonmark==0.9.1 \ - --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ - --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 - # via rich -cryptography==41.0.3 \ - --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ - --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ - --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ - --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ - --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ - --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ - --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ - --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ - --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ - --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ - --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ - --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ - --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ - --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ - --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ - --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ - --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ - --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ - --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ - --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ - --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ - --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ - --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de - # via - # gcp-releasetool - # secretstorage -distlib==0.3.6 \ - --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \ - --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e - # via virtualenv -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc - # via readme-renderer -filelock==3.8.0 \ - --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ - --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 - # via virtualenv -gcp-docuploader==0.6.4 \ - --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ - --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf - # via -r requirements.in -gcp-releasetool==1.10.5 \ - --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ - --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 - # via -r requirements.in -google-api-core==2.10.2 \ - --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ - --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e - # via - # google-cloud-core - # google-cloud-storage -google-auth==2.14.1 \ - --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ - --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 - # via - # gcp-releasetool - # google-api-core - # google-cloud-core - # google-cloud-storage -google-cloud-core==2.3.2 \ - --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ - --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a - # via google-cloud-storage -google-cloud-storage==2.6.0 \ - --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ - --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 - # via gcp-docuploader -google-crc32c==1.5.0 \ - --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ - --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ - --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ - --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ - --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ - --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ - --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ - --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ - --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ - --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ - --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ - --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ - --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ - --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ - --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ - --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ - --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ - --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ - --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ - --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ - --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ - --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ - --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ - --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ - --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ - --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ - --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ - --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ - --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ - --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ - --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ - --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ - --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ - --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ - --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ - --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ - --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ - --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ - --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ - --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ - --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ - --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ - --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ - --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ - --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ - --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ - --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ - --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ - --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ - --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ - --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ - --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ - --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ - --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ - --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ - --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ - --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ - --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ - --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ - --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ - --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ - --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ - --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ - --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ - --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ - --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ - --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ - --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 - # via google-resumable-media -google-resumable-media==2.4.0 \ - --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ - --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f - # via google-cloud-storage -googleapis-common-protos==1.57.0 \ - --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ - --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c - # via google-api-core -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 - # via requests -importlib-metadata==5.0.0 \ - --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ - --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 - # via - # -r requirements.in - # keyring - # twine -jaraco-classes==3.2.3 \ - --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ - --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a - # via keyring -jeepney==0.8.0 \ - --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ - --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755 - # via - # keyring - # secretstorage -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 - # via gcp-releasetool -keyring==23.11.0 \ - --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ - --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 - # via - # gcp-releasetool - # twine -markupsafe==2.1.1 \ - --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ - --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \ - --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \ - --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \ - --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \ - --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \ - --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \ - --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \ - --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \ - --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \ - --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \ - --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \ - --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \ - --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \ - --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \ - --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \ - --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \ - --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \ - --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \ - --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \ - --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \ - --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \ - --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \ - --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \ - --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \ - --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \ - --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \ - --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \ - --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \ - --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \ - --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \ - --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \ - --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \ - --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \ - --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \ - --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \ - --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \ - --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \ - --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ - --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 - # via jinja2 -more-itertools==9.0.0 \ - --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ - --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab - # via jaraco-classes -nox==2022.11.21 \ - --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ - --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 - # via -r requirements.in -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 - # via - # gcp-releasetool - # nox -pkginfo==1.8.3 \ - --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ - --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c - # via twine -platformdirs==2.5.4 \ - --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ - --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 - # via virtualenv -protobuf==3.20.3 \ - --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ - --hash=sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c \ - --hash=sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2 \ - --hash=sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b \ - --hash=sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050 \ - --hash=sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9 \ - --hash=sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7 \ - --hash=sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454 \ - --hash=sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480 \ - --hash=sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469 \ - --hash=sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c \ - --hash=sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e \ - --hash=sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db \ - --hash=sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905 \ - --hash=sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b \ - --hash=sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86 \ - --hash=sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4 \ - --hash=sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402 \ - --hash=sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7 \ - --hash=sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4 \ - --hash=sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99 \ - --hash=sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee - # via - # gcp-docuploader - # gcp-releasetool - # google-api-core -pyasn1==0.4.8 \ - --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ - --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.2.8 \ - --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \ - --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74 - # via google-auth -pycparser==2.21 \ - --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ - --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 - # via cffi -pygments==2.15.0 \ - --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ - --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 - # via - # readme-renderer - # rich -pyjwt==2.6.0 \ - --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ - --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 - # via gcp-releasetool -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging -pyperclip==1.8.2 \ - --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 - # via gcp-releasetool -python-dateutil==2.8.2 \ - --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ - --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 - # via gcp-releasetool -readme-renderer==37.3 \ - --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ - --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 - # via twine -requests==2.31.0 \ - --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ - --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 - # via - # gcp-releasetool - # google-api-core - # google-cloud-storage - # requests-toolbelt - # twine -requests-toolbelt==0.10.1 \ - --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ - --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d - # via twine -rfc3986==2.0.0 \ - --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ - --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c - # via twine -rich==12.6.0 \ - --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ - --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 - # via twine -rsa==4.9 \ - --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ - --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 - # via google-auth -secretstorage==3.3.3 \ - --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \ - --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99 - # via keyring -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # bleach - # gcp-docuploader - # google-auth - # python-dateutil -twine==4.0.1 \ - --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ - --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 - # via -r requirements.in -typing-extensions==4.4.0 \ - --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ - --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e - # via -r requirements.in -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 - # via - # requests - # twine -virtualenv==20.16.7 \ - --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ - --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 - # via nox -webencodings==0.5.1 \ - --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ - --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 - # via bleach -wheel==0.38.4 \ - --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ - --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 - # via -r requirements.in -zipp==3.10.0 \ - --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ - --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -setuptools==65.5.1 \ - --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ - --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f - # via -r requirements.in diff --git a/.kokoro/samples/python3.12/common.cfg b/.kokoro/samples/python3.12/common.cfg new file mode 100644 index 000000000..34e0a95f3 --- /dev/null +++ b/.kokoro/samples/python3.12/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.12" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-312" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.12/continuous.cfg b/.kokoro/samples/python3.12/continuous.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.12/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.12/periodic-head.cfg b/.kokoro/samples/python3.12/periodic-head.cfg new file mode 100644 index 000000000..be25a34f9 --- /dev/null +++ b/.kokoro/samples/python3.12/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.12/periodic.cfg b/.kokoro/samples/python3.12/periodic.cfg new file mode 100644 index 000000000..71cd1e597 --- /dev/null +++ b/.kokoro/samples/python3.12/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.12/presubmit.cfg b/.kokoro/samples/python3.12/presubmit.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.12/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.13/common.cfg b/.kokoro/samples/python3.13/common.cfg new file mode 100644 index 000000000..15ba807cb --- /dev/null +++ b/.kokoro/samples/python3.13/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.13" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-313" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/.kokoro/samples/python3.13/continuous.cfg b/.kokoro/samples/python3.13/continuous.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.13/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.13/periodic-head.cfg b/.kokoro/samples/python3.13/periodic-head.cfg new file mode 100644 index 000000000..be25a34f9 --- /dev/null +++ b/.kokoro/samples/python3.13/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.13/periodic.cfg b/.kokoro/samples/python3.13/periodic.cfg new file mode 100644 index 000000000..71cd1e597 --- /dev/null +++ b/.kokoro/samples/python3.13/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.13/presubmit.cfg b/.kokoro/samples/python3.13/presubmit.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.13/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.14/common.cfg b/.kokoro/samples/python3.14/common.cfg new file mode 100644 index 000000000..a9ea06119 --- /dev/null +++ b/.kokoro/samples/python3.14/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.14" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-314" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/.kokoro/samples/python3.14/continuous.cfg b/.kokoro/samples/python3.14/continuous.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.14/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.14/periodic-head.cfg b/.kokoro/samples/python3.14/periodic-head.cfg new file mode 100644 index 000000000..be25a34f9 --- /dev/null +++ b/.kokoro/samples/python3.14/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.14/periodic.cfg b/.kokoro/samples/python3.14/periodic.cfg new file mode 100644 index 000000000..71cd1e597 --- /dev/null +++ b/.kokoro/samples/python3.14/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.14/presubmit.cfg b/.kokoro/samples/python3.14/presubmit.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.14/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh index 63ac41dfa..e9d8bd79a 100755 --- a/.kokoro/test-samples-against-head.sh +++ b/.kokoro/test-samples-against-head.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh index 5a0f5fab6..53e365bc4 100755 --- a/.kokoro/test-samples-impl.sh +++ b/.kokoro/test-samples-impl.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,8 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Install nox -python3.9 -m pip install --upgrade --quiet nox +# `virtualenv==20.26.6` is added for Python 3.7 compatibility +python3.9 -m pip install --upgrade --quiet nox virtualenv==20.26.6 # Use secrets acessor service account to get secrets if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 50b35a48c..7933d8201 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index d85b1f267..48f796997 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh index 59a7cf3a9..d03f92dfc 100755 --- a/.kokoro/trampoline_v2.sh +++ b/.kokoro/trampoline_v2.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,8 +26,8 @@ # To run this script, first download few files from gcs to /dev/shm. # (/dev/shm is passed into the container as KOKORO_GFILE_DIR). # -# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm -# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# gcloud storage cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gcloud storage cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm # # Then run the script. # .kokoro/trampoline_v2.sh diff --git a/.librarian/generator-input/.repo-metadata.json b/.librarian/generator-input/.repo-metadata.json new file mode 100644 index 000000000..9de4b5f92 --- /dev/null +++ b/.librarian/generator-input/.repo-metadata.json @@ -0,0 +1,80 @@ +{ + "name": "bigtable", + "name_pretty": "Cloud Bigtable", + "product_documentation": "https://cloud.google.com/bigtable", + "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", + "release_level": "stable", + "language": "python", + "library_type": "GAPIC_COMBO", + "repo": "googleapis/python-bigtable", + "distribution_name": "google-cloud-bigtable", + "api_id": "bigtable.googleapis.com", + "requires_billing": true, + "samples": [ + { + "name": "Hello World in Cloud Bigtable", + "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello" + }, + { + "name": "Hello World using HappyBase", + "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello_happybase" + }, + { + "name": "cbt Command Demonstration", + "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", + "file": "instanceadmin.py", + "runnable": true, + "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "instanceadmin" + }, + { + "name": "Metric Scaler", + "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", + "file": "metricscaler.py", + "runnable": true, + "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", + "override_path": "metricscaler" + }, + { + "name": "Quickstart", + "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", + "override_path": "quickstart" + }, + { + "name": "Quickstart using HappyBase", + "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "tableadmin" + } + ], + "default_version": "v2", + "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners", + "api_shortname": "bigtable" +} diff --git a/.librarian/generator-input/librarian.py b/.librarian/generator-input/librarian.py new file mode 100644 index 000000000..5b943d24b --- /dev/null +++ b/.librarian/generator-input/librarian.py @@ -0,0 +1,266 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script is used to synthesize generated parts of this library.""" + +from pathlib import Path +import re +import textwrap +from typing import List, Optional + +import synthtool as s +from synthtool import gcp, _tracked_paths +from synthtool.languages import python +from synthtool.sources import templates + +common = gcp.CommonTemplates() + +# These flags are needed because certain post-processing operations +# append things after a certain line of text, and can infinitely loop +# in a Github PR. We use these flags to only do those operations +# on fresh copies of files found in googleapis-gen, and not on user-submitted +# changes. +is_fresh_admin_copy = False +is_fresh_admin_v2_copy = False +is_fresh_admin_docs_copy = False + +for library in s.get_staging_dirs("v2"): + s.move(library / "google/cloud/bigtable_v2") + is_fresh_admin_copy = \ + s.move(library / "google/cloud/bigtable_admin") + is_fresh_admin_v2_copy = \ + s.move(library / "google/cloud/bigtable_admin_v2") + s.move(library / "tests") + s.move(library / "samples") + s.move(library / "scripts") + is_fresh_admin_docs_copy = \ + s.move(library / "docs/bigtable_admin_v2", destination="docs/admin_client") + +s.remove_staging_dirs() + +# ---------------------------------------------------------------------------- +# Add templated files +# ---------------------------------------------------------------------------- +templated_files = common.py_library( + samples=True, # set to True only if there are samples + split_system_tests=True, + microgenerator=True, + cov_level=99, + system_test_external_dependencies=[ + "pytest-asyncio==0.21.2", + ], + system_test_python_versions=["3.9"], + unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"], + default_python_version="3.13", +) + +s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/**", ".kokoro/**", "noxfile.py", "renovate.json"]) + + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) + +# ---------------------------------------------------------------------------- +# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109 +# TODO: remove after backend no longer requires empty strings +# ---------------------------------------------------------------------------- +for file in ["async_client.py", "client.py"]: + s.replace( + f"google/cloud/bigtable_v2/services/bigtable/{file}", + "if request.app_profile_id:", + "if True: # always attach app_profile_id, even if empty string" + ) +# fix tests +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + 'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*', + """# assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])""" +) +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + 'expected_headers = {"name": "projects/sample1/instances/sample2"}', + """expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + }""" +) +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + """ + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } +""", + """ + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } +""" +) + +# ---------------------------------------------------------------------------- +# Samples templates +# ---------------------------------------------------------------------------- + +python.py_samples(skip_readmes=True) + +# -------------------------------------------------------------------------- +# Admin Overlay work +# -------------------------------------------------------------------------- + +# Add overlay imports to top level __init__.py files in admin_v2 and admin at the end +# of each file, after the __all__ definition. These changes should only be done on fresh +# copies of the __init__.py files. +def add_overlay_to_init_py(init_py_location, import_statements, should_add): + if should_add: + s.replace( + init_py_location, + r"(?s)(^__all__ = \(.*\)$)", + r"\1\n\n" + import_statements + ) + +add_overlay_to_init_py( + "google/cloud/bigtable_admin_v2/__init__.py", + """from .overlay import * # noqa: F403\n +__all__ += overlay.__all__ # noqa: F405""", + is_fresh_admin_v2_copy, +) + +add_overlay_to_init_py( + "google/cloud/bigtable_admin/__init__.py", + """import google.cloud.bigtable_admin_v2.overlay # noqa: F401 +from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 + +__all__ += google.cloud.bigtable_admin_v2.overlay.__all__""", + is_fresh_admin_copy, +) + +# Replace all instances of BaseBigtableTableAdminClient/BaseBigtableAdminAsyncClient +# in samples and docstrings with BigtableTableAdminClient/BigtableTableAdminAsyncClient +s.replace( + [ + "google/cloud/bigtable_admin_v2/services/*/client.py", + "google/cloud/bigtable_admin_v2/services/*/async_client.py", + "samples/generated_samples/bigtableadmin_v2_*.py" + ], + r"client = bigtable_admin_v2\.Base(BigtableTableAdmin(Async)?Client\(\))", + r"client = bigtable_admin_v2.\1" +) + +# Fix an improperly formatted table that breaks nox -s docs. +s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + """ For example, if \\\\_key = + "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following + schema: \\{ fields \\{ field_name: "id" type \\{ string \\{ + encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ field_name: "date" + type \\{ string \\{ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ + field_name: "product_code" type \\{ int64 \\{ encoding: + big_endian_bytes \\{\\} \\} \\} \\} encoding \\{ delimited_bytes \\{ + delimiter: "#" \\} \\} \\} + + \\| The decoded key parts would be: id = "some_id", date = + "2024-04-30", product_code = 1245427 The query "SELECT + \\\\_key, product_code FROM table" will return two columns: + /------------------------------------------------------ + \\| \\\\\\| \\\\_key \\\\\\| product_code \\\\\\| \\\\\\| + --------------------------------------\\|--------------\\\\\\| \\\\\\| + "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" \\\\\\| 1245427 \\\\\\| + ------------------------------------------------------/ +""", + textwrap.indent( + """For example, if \\\\_key = +"some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following +schema: + +.. code-block:: + + { + fields { + field_name: "id" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "date" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "product_code" + type { int64 { encoding: big_endian_bytes {} } } + } + encoding { delimited_bytes { delimiter: "#" } } + } + +The decoded key parts would be: +id = "some_id", date = "2024-04-30", product_code = 1245427 +The query "SELECT \\\\_key, product_code FROM table" will return +two columns: + ++========================================+==============+ +| \\\\_key | product_code | ++========================================+==============+ +| "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" | 1245427 | ++----------------------------------------+--------------+ +""", + " " * 12, + ), +) + +# These changes should only be done on fresh copies of the .rst files +# from googleapis-gen. +if is_fresh_admin_docs_copy: + # Change the subpackage for clients with overridden internal methods in them + # from service to overlay.service. + s.replace( + "docs/admin_client/bigtable_table_admin.rst", + r"^\.\. automodule:: google\.cloud\.bigtable_admin_v2\.services\.bigtable_table_admin$", + ".. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin" + ) + + # Add overlay types to types documentation + s.replace( + "docs/admin_client/types_.rst", + r"""(\.\. automodule:: google\.cloud\.bigtable_admin_v2\.types + :members: + :show-inheritance:) +""", + r"""\1 + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.types + :members: + :show-inheritance: +""" + ) + +# These changes should only be done on a fresh copy of table.py +# from googleapis-gen. +if is_fresh_admin_v2_copy: + # Add the oneof_message import into table.py for GcRule + s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + r"^(from google\.cloud\.bigtable_admin_v2\.types import .+)$", + r"""\1 +from google.cloud.bigtable_admin_v2.utils import oneof_message""", + ) + + # Re-subclass GcRule in table.py + s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + r"class GcRule\(proto\.Message\)\:", + "class GcRule(oneof_message.OneofMessage):", + ) diff --git a/.librarian/generator-input/noxfile.py b/.librarian/generator-input/noxfile.py new file mode 100644 index 000000000..d1176966e --- /dev/null +++ b/.librarian/generator-input/noxfile.py @@ -0,0 +1,569 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! + +from __future__ import absolute_import + +import os +import pathlib +import re +import shutil +from typing import Dict, List +import warnings + +import nox + +FLAKE8_VERSION = "flake8==6.1.0" +BLACK_VERSION = "black[jupyter]==23.3.0" +ISORT_VERSION = "isort==5.11.0" +LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"] + +DEFAULT_PYTHON_VERSION = "3.13" + +UNIT_TEST_PYTHON_VERSIONS: List[str] = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", + "3.14", +] +UNIT_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "asyncmock", + "pytest", + "pytest-cov", + "pytest-asyncio", + BLACK_VERSION, + "autoflake", +] +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.10", "3.14"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ + "mock", + "pytest", + "google-cloud-testutils", +] +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ + "pytest-asyncio==0.21.2", + BLACK_VERSION, + "pyyaml==6.0.2", +] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit-3.10", + "unit-3.11", + "unit-3.12", + "unit-3.13", + "unit-3.14", + "system_emulated", + "system", + "mypy", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", + "format", +] + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install(FLAKE8_VERSION, BLACK_VERSION) + session.run( + "black", + "--check", + *LINT_PATHS, + ) + session.run("flake8", "google", "tests") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def format(session): + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run( + "isort", + "--fss", + *LINT_PATHS, + ) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def mypy(session): + """Verify type hints are mypy compatible.""" + session.install("-e", ".") + session.install( + "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" + ) + session.install("google-cloud-testutils") + session.run("mypy", "-p", "google.cloud.bigtable.data") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("setuptools", "docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") + + +def install_unittest_dependencies(session, *constraints): + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, *constraints) + + if UNIT_TEST_EXTERNAL_DEPENDENCIES: + warnings.warn( + "'unit_test_external_dependencies' is deprecated. Instead, please " + "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.", + DeprecationWarning, + ) + session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_LOCAL_DEPENDENCIES: + session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_EXTRAS_BY_PYTHON: + extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif UNIT_TEST_EXTRAS: + extras = UNIT_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def unit(session, protobuf_implementation): + # Install all test dependencies, then install this package in-place. + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") + + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + + # Run py.test against the unit tests. + session.run( + "py.test", + "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "unit"), + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + +def install_systemtest_dependencies(session, *constraints): + # Use pre-release gRPC for system tests. + # Exclude version 1.52.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/issues/32163 + session.install("--pre", "grpcio!=1.52.0rc1") + + session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTERNAL_DEPENDENCIES: + session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_LOCAL_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTRAS_BY_PYTHON: + extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif SYSTEM_TEST_EXTRAS: + extras = SYSTEM_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def system_emulated(session): + import subprocess + import signal + + try: + subprocess.call(["gcloud", "--version"]) + except OSError: + session.skip("gcloud not found but required for emulator support") + + # Currently, CI/CD doesn't have beta component of gcloud. + subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) + + hostport = "localhost:8789" + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + + p = subprocess.Popen( + ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] + ) + + try: + system(session) + finally: + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGKILL) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +@nox.parametrize("client_type", ["async", "sync", "legacy"]) +def conformance(session, client_type): + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + session.run( + "bash", + "-e", + "run_tests.sh", + external=True, + env={"CLIENT_TYPE": client_type}, + ) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def system(session): + """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") + + system_test_exists = os.path.exists(system_test_path) + system_test_folder_exists = os.path.exists(system_test_folder_path) + # Sanity check: only run tests if found. + if not system_test_exists and not system_test_folder_exists: + session.skip("System tests were not found") + + install_systemtest_dependencies(session, "-c", constraints_path) + + # Run py.test against the system tests. + if system_test_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) + if system_test_folder_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") + + session.run("coverage", "erase") + + +@nox.session(python="3.10") +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "sphinx==4.5.0", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python="3.10") +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "gcp-sphinx-docfx-yaml", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + # Customization: Add extra sections to the table of contents for the Classic vs Async clients + session.install("pyyaml") + session.run("python", "docs/scripts/patch_devsite_toc.py") + + +@nox.session(python="3.14") +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def prerelease_deps(session, protobuf_implementation): + """Run all tests with prerelease versions of dependencies installed.""" + + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") + + # Install all dependencies + session.install("-e", ".[all, tests, tracing]") + unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES + session.install(*unit_deps_all) + system_deps_all = ( + SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES + ) + session.install(*system_deps_all) + + # Because we test minimum dependency versions on the minimum Python + # version, the first version we test with in the unit tests sessions has a + # constraints file containing all dependencies and extras. + with open( + CURRENT_DIRECTORY + / "testing" + / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt", + encoding="utf-8", + ) as constraints_file: + constraints_text = constraints_file.read() + + # Ignore leading whitespace and comment lines. + constraints_deps = [ + match.group(1) + for match in re.finditer( + r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE + ) + ] + + session.install(*constraints_deps) + + prerel_deps = [ + "protobuf", + # dependency of grpc + "six", + "grpc-google-iam-v1", + "googleapis-common-protos", + "grpcio", + "grpcio-status", + "google-api-core", + "google-auth", + "proto-plus", + "google-cloud-testutils", + # dependencies of google-cloud-testutils" + "click", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = [ + "requests", + "cryptography", + ] + session.install(*other_deps) + + # Print out prerelease package versions + session.run( + "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" + ) + session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") + + session.run( + "py.test", + "tests/unit", + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Only run system tests if found. + if os.path.exists(system_test_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + if os.path.exists(system_test_folder_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + +@nox.session(python="3.10") +def generate_sync(session): + """ + Re-generate sync files for the library from CrossSync-annotated async source + """ + session.install(BLACK_VERSION) + session.install("autoflake") + session.run("python", ".cross_sync/generate.py", ".") diff --git a/.librarian/generator-input/setup.py b/.librarian/generator-input/setup.py new file mode 100644 index 000000000..fd8062970 --- /dev/null +++ b/.librarian/generator-input/setup.py @@ -0,0 +1,100 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +import setuptools + + +package_root = os.path.abspath(os.path.dirname(__file__)) + +# Package metadata. + +name = "google-cloud-bigtable" +description = "Google Cloud Bigtable API client library" + +version = {} +with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp: + exec(fp.read(), version) +version = version["__version__"] + + +# Should be one of: +# 'Development Status :: 3 - Alpha' +# 'Development Status :: 4 - Beta' +# 'Development Status :: 5 - Production/Stable' +release_status = "Development Status :: 5 - Production/Stable" +dependencies = [ + "google-api-core[grpc] >= 2.17.0, <3.0.0", + "google-cloud-core >= 1.4.4, <3.0.0", + "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0", + "proto-plus >= 1.22.3, <2.0.0", + "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", + "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-crc32c>=1.5.0, <2.0.0dev", +] +extras = {"libcst": "libcst >= 0.2.5"} + + +# Setup boilerplate below this line. + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +# Only include packages under the 'google' namespace. Do not include tests, +# benchmarks, etc. +packages = [ + package + for package in setuptools.find_namespace_packages() + if package.startswith("google") +] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url="https://github.com/googleapis/python-bigtable", + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + install_requires=dependencies, + extras_require=extras, + python_requires=">=3.7", + include_package_data=True, + zip_safe=False, +) diff --git a/.librarian/state.yaml b/.librarian/state.yaml new file mode 100644 index 000000000..71d0e465d --- /dev/null +++ b/.librarian/state.yaml @@ -0,0 +1,40 @@ +image: us-central1-docker.pkg.dev/cloud-sdk-librarian-prod/images-prod/python-librarian-generator@sha256:b8058df4c45e9a6e07f6b4d65b458d0d059241dd34c814f151c8bf6b89211209 +libraries: + - id: google-cloud-bigtable + version: 2.35.0 + last_generated_commit: 9637e50bc0ff6a5e8944980aaf6a2b7f34a90910 + apis: + - path: google/bigtable/v2 + service_config: bigtable_v2.yaml + - path: google/bigtable/admin/v2 + service_config: bigtableadmin_v2.yaml + source_roots: + - . + preserve_regex: [] + remove_regex: + - ^.pre-commit-config.yaml + - ^.repo-metadata.json + - ^.trampolinerc + - ^docs/admin_client/bigtable + - ^docs/admin_client/services_.rst + - ^docs/admin_client/types_.rst + - ^docs/summary_overview.md + - ^google/cloud/bigtable_v2 + - ^google/cloud/bigtable_admin/ + - ^google/cloud/bigtable_admin_v2/services + - ^google/cloud/bigtable_admin_v2/types + - ^google/cloud/bigtable_admin_v2/__init__.py + - ^google/cloud/bigtable_admin_v2/gapic + - ^google/cloud/bigtable_admin_v2/py.typed + - ^samples/AUTHORING_GUIDE.md + - ^samples/CONTRIBUTING.md + - ^samples/generated_samples + - ^tests/unit/gapic + - ^noxfile.py + - ^scripts/fixup_bigtable + - ^setup.py + - ^SECURITY.md + - ^tests/__init__.py + - ^tests/unit/__init__.py + - ^tests/unit/gapic + tag_format: v{version} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19409cbd3..1d74695f7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 diff --git a/.release-please-manifest.json b/.release-please-manifest.json deleted file mode 100644 index 5be20145a..000000000 --- a/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - ".": "2.21.0" -} \ No newline at end of file diff --git a/.repo-metadata.json b/.repo-metadata.json index 3c65ac669..9de4b5f92 100644 --- a/.repo-metadata.json +++ b/.repo-metadata.json @@ -75,6 +75,6 @@ } ], "default_version": "v2", - "codeowner_team": "@googleapis/api-bigtable", + "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners", "api_shortname": "bigtable" } diff --git a/.trampolinerc b/.trampolinerc index a7dfeb42c..008015237 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/CHANGELOG.md b/CHANGELOG.md index 1a2a6ad3a..cbb707694 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,222 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.35.0](https://github.com/googleapis/python-bigtable/compare/v2.34.0...v2.35.0) (2025-12-16) + + +### Features + +* support mTLS certificates when available (#1249) ([ca20219cf45305de25dfb715f69dd63bce9981b7](https://github.com/googleapis/python-bigtable/commit/ca20219cf45305de25dfb715f69dd63bce9981b7)) +* add basic interceptor to client (#1206) ([6561cfac605ba7c5b3f750c3bdca9108e517ba77](https://github.com/googleapis/python-bigtable/commit/6561cfac605ba7c5b3f750c3bdca9108e517ba77)) +* add PeerInfo proto in Bigtable API ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350)) +* Add Type API updates needed to support structured keys in materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350)) +* Add encodings for STRUCT and the Timestamp type ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350)) + + +### Bug Fixes + +* async client uses fixed grace period (#1236) ([544db1cd7af876298b8637f495b6c7b2a0bcf16c](https://github.com/googleapis/python-bigtable/commit/544db1cd7af876298b8637f495b6c7b2a0bcf16c)) +* re-export AddToCell for consistency (#1241) ([2a5baf11d30dc383a7b48d5f43b6cbb6160782e3](https://github.com/googleapis/python-bigtable/commit/2a5baf11d30dc383a7b48d5f43b6cbb6160782e3)) +* retry cancelled errors (#1235) ([e3fd5d8668303db4ed35e9bf6be48b46954f9d67](https://github.com/googleapis/python-bigtable/commit/e3fd5d8668303db4ed35e9bf6be48b46954f9d67)) +* Add ReadRows/SampleRowKeys bindings for materialized views ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350)) +* Deprecate credentials_file argument ([72dfdc440c22db0f4c372e6f11a9f7dc83fed350](https://github.com/googleapis/python-bigtable/commit/72dfdc440c22db0f4c372e6f11a9f7dc83fed350)) + +## [2.34.0](https://github.com/googleapis/python-bigtable/compare/v2.33.0...v2.34.0) (2025-10-16) + + +### Features + +* Add support for Python 3.14 ([#1217](https://github.com/googleapis/python-bigtable/issues/1217)) ([263332a](https://github.com/googleapis/python-bigtable/commit/263332af71a229cb4fa598008a708137086a6f67)) + +## [2.33.0](https://github.com/googleapis/python-bigtable/compare/v2.32.0...v2.33.0) (2025-10-06) + + +### Features + +* Add support for Proto and Enum types ([#1202](https://github.com/googleapis/python-bigtable/issues/1202)) ([34ceb86](https://github.com/googleapis/python-bigtable/commit/34ceb86007db08d453fa25cca4968d5b498ffcd6)) +* Expose universe_domain for tpc ([#1150](https://github.com/googleapis/python-bigtable/issues/1150)) ([451fd97](https://github.com/googleapis/python-bigtable/commit/451fd97e435218ffed47d39423680ffc4feccac4)) + + +### Bug Fixes + +* Fix instance registration cleanup on early iterator termination ([#1216](https://github.com/googleapis/python-bigtable/issues/1216)) ([bbfd746](https://github.com/googleapis/python-bigtable/commit/bbfd746c61a6362efa42c7899ec3e34ceb541c83)) +* Refactor channel refresh ([#1174](https://github.com/googleapis/python-bigtable/issues/1174)) ([6fa3008](https://github.com/googleapis/python-bigtable/commit/6fa30084058bc34d4487d1fee5c87d7795ff167a)) + +## [2.32.0](https://github.com/googleapis/python-bigtable/compare/v2.31.0...v2.32.0) (2025-08-01) + + +### Features + +* Add Idempotency to Cloud Bigtable MutateRowsRequest API ([#1143](https://github.com/googleapis/python-bigtable/issues/1143)) ([c3e3eb0](https://github.com/googleapis/python-bigtable/commit/c3e3eb0e4ce44ece72b150dc5822846627074fba)) +* Add support for AddToCell in Data Client ([#1147](https://github.com/googleapis/python-bigtable/issues/1147)) ([1a5b4b5](https://github.com/googleapis/python-bigtable/commit/1a5b4b514cadae5c83d61296314285d3774992c5)) +* Implement SQL support in test proxy ([#1106](https://github.com/googleapis/python-bigtable/issues/1106)) ([7a91bbf](https://github.com/googleapis/python-bigtable/commit/7a91bbfb9df23f7e93c40b88648840342af6f16f)) +* Modernized Bigtable Admin Client featuring selective GAPIC generation ([#1177](https://github.com/googleapis/python-bigtable/issues/1177)) ([58e7d37](https://github.com/googleapis/python-bigtable/commit/58e7d3782df6b13a42af053263afc575222a6b83)) + +## [2.31.0](https://github.com/googleapis/python-bigtable/compare/v2.30.1...v2.31.0) (2025-05-22) + + +### Features + +* Add deletion_protection support for LVs ([#1108](https://github.com/googleapis/python-bigtable/issues/1108)) ([c6d384d](https://github.com/googleapis/python-bigtable/commit/c6d384d4a104c182326e22dc3f10b7b905780dee)) +* Support authorized views ([#1034](https://github.com/googleapis/python-bigtable/issues/1034)) ([97a0198](https://github.com/googleapis/python-bigtable/commit/97a019833d82e617769c56761aa5548d3ab896b9)) +* Throw better error on invalid metadata response ([#1107](https://github.com/googleapis/python-bigtable/issues/1107)) ([2642317](https://github.com/googleapis/python-bigtable/commit/2642317077b723ca8fd62aa86322b524868c2c4d)) + + +### Bug Fixes + +* Re-add py-typed file for bigtable package ([#1085](https://github.com/googleapis/python-bigtable/issues/1085)) ([0c322c7](https://github.com/googleapis/python-bigtable/commit/0c322c79ecbe4cde3e79d8e83ac655a978d07877)) + +## [2.30.1](https://github.com/googleapis/python-bigtable/compare/v2.30.0...v2.30.1) (2025-04-17) + + +### Bug Fixes + +* Populate SQL app_profile_id header even when it is unset ([#1109](https://github.com/googleapis/python-bigtable/issues/1109)) ([17b75bd](https://github.com/googleapis/python-bigtable/commit/17b75bd746cb0a616f64a05eb0ed72b46de28a17)) + +## [2.30.0](https://github.com/googleapis/python-bigtable/compare/v2.29.0...v2.30.0) (2025-03-18) + + +### Features + +* Update ExecuteQuery to use Prepare ([#1100](https://github.com/googleapis/python-bigtable/issues/1100)) ([8a7abc1](https://github.com/googleapis/python-bigtable/commit/8a7abc1e9c34a9122b2d648e8a358a7097ed3a5d)) + + +### Bug Fixes + +* Allow protobuf 6.x ([#1092](https://github.com/googleapis/python-bigtable/issues/1092)) ([1015fa8](https://github.com/googleapis/python-bigtable/commit/1015fa83c505487f09820e3a37f76690bd00ab5d)) +* Remove setup.cfg configuration for creating universal wheels ([#1097](https://github.com/googleapis/python-bigtable/issues/1097)) ([95f4b82](https://github.com/googleapis/python-bigtable/commit/95f4b8233cba2a18633e64c5e0bc177e23767a83)) + +## [2.29.0](https://github.com/googleapis/python-bigtable/compare/v2.28.1...v2.29.0) (2025-02-26) + + +### Features + +* Add support for array and float32 SQL query params ([#1078](https://github.com/googleapis/python-bigtable/issues/1078)) ([89b8da8](https://github.com/googleapis/python-bigtable/commit/89b8da8a445aeb08854d9fa77cbc0e4fc042c87f)) + + +### Bug Fixes + +* Grpc channel refresh ([#1087](https://github.com/googleapis/python-bigtable/issues/1087)) ([f44b36b](https://github.com/googleapis/python-bigtable/commit/f44b36bf51e3e4e3b8a774f96e682d3f1f8d4b16)) + +## [2.28.1](https://github.com/googleapis/python-bigtable/compare/v2.28.0...v2.28.1) (2025-01-17) + + +### Bug Fixes + +* Allow empty headers for btql routing ([#1072](https://github.com/googleapis/python-bigtable/issues/1072)) ([e7ecfeb](https://github.com/googleapis/python-bigtable/commit/e7ecfeb8984a45c880d9483305964fff347eb4b8)) + +## [2.28.0](https://github.com/googleapis/python-bigtable/compare/v2.27.0...v2.28.0) (2025-01-08) + + +### Features + +* Add generated sync client ([#1017](https://github.com/googleapis/python-bigtable/issues/1017)) ([f974823](https://github.com/googleapis/python-bigtable/commit/f974823bf8a74c2f8b1bc69997b13bc1acaf8bef)) + +## [2.27.0](https://github.com/googleapis/python-bigtable/compare/v2.26.0...v2.27.0) (2024-11-12) + + +### Features + +* Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters ([#1023](https://github.com/googleapis/python-bigtable/issues/1023)) ([0809c6a](https://github.com/googleapis/python-bigtable/commit/0809c6ac274e909103ad160a8bcab95f8bb46f31)) +* Surface `retry` param to `Table.read_row` api ([#982](https://github.com/googleapis/python-bigtable/issues/982)) ([a8286d2](https://github.com/googleapis/python-bigtable/commit/a8286d2a510f654f9c270c3c761c02e4ab3817d4)) + + +### Bug Fixes + +* Registering duplicate instance ([#1033](https://github.com/googleapis/python-bigtable/issues/1033)) ([2bca8fb](https://github.com/googleapis/python-bigtable/commit/2bca8fb220eeb1906fc6a3cf1f879f3d41fbbff8)) + +## [2.26.0](https://github.com/googleapis/python-bigtable/compare/v2.25.0...v2.26.0) (2024-08-12) + + +### Features + +* Add fields and the BackupType proto for Hot Backups ([#1010](https://github.com/googleapis/python-bigtable/issues/1010)) ([b95801f](https://github.com/googleapis/python-bigtable/commit/b95801ffa8081e0072232247fbc5879105c109a6)) +* Add MergeToCell to Mutation APIs ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Add min, max, hll aggregators and more types ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Async execute query client ([#1011](https://github.com/googleapis/python-bigtable/issues/1011)) ([45bc8c4](https://github.com/googleapis/python-bigtable/commit/45bc8c4a0fe567ce5e0126a1a70e7eb3dca93e92)) + + +### Bug Fixes + +* Use single routing metadata header ([#1005](https://github.com/googleapis/python-bigtable/issues/1005)) ([20eeb0a](https://github.com/googleapis/python-bigtable/commit/20eeb0a68d7b44d07a6d84bc7a7e040ad63bb96d)) + + +### Documentation + +* Add clarification around SQL timestamps ([#1012](https://github.com/googleapis/python-bigtable/issues/1012)) ([6e80190](https://github.com/googleapis/python-bigtable/commit/6e801900bbe9385d3b579b8c3327c87c3617d92f)) +* Corrected various type documentation ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) + +## [2.25.0](https://github.com/googleapis/python-bigtable/compare/v2.24.0...v2.25.0) (2024-07-18) + + +### Features + +* Publish ProtoRows Message ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) +* Publish the Cloud Bigtable ExecuteQuery API ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) + + +### Bug Fixes + +* Allow protobuf 5.x ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) + +## [2.24.0](https://github.com/googleapis/python-bigtable/compare/v2.23.1...v2.24.0) (2024-06-11) + + +### Features + +* Add String type with Utf8Raw encoding to Bigtable API ([#968](https://github.com/googleapis/python-bigtable/issues/968)) ([2a2bbfd](https://github.com/googleapis/python-bigtable/commit/2a2bbfdba6737c508ab1073d37fef680ca2a8c2f)) +* Improve async sharding ([#977](https://github.com/googleapis/python-bigtable/issues/977)) ([fd1f7da](https://github.com/googleapis/python-bigtable/commit/fd1f7dafd38f7f0e714a3384a27176f485523682)) + + +### Bug Fixes + +* **backup:** Backup name regex ([#970](https://github.com/googleapis/python-bigtable/issues/970)) ([6ef122a](https://github.com/googleapis/python-bigtable/commit/6ef122ad49f43e3a22cde5cb6fdaefd947670136)) +* Improve rowset revision ([#979](https://github.com/googleapis/python-bigtable/issues/979)) ([da27527](https://github.com/googleapis/python-bigtable/commit/da275279a7e619e4cd3e72b10ac629d6e0e1fe47)) + +## [2.23.1](https://github.com/googleapis/python-bigtable/compare/v2.23.0...v2.23.1) (2024-04-15) + + +### Bug Fixes + +* Use insecure grpc channel with emulator ([#946](https://github.com/googleapis/python-bigtable/issues/946)) ([aa31706](https://github.com/googleapis/python-bigtable/commit/aa3170663f9bd09d70c99d4e76c07f7f293ad935)) + +## [2.23.0](https://github.com/googleapis/python-bigtable/compare/v2.22.0...v2.23.0) (2024-02-07) + + +### Features + +* Add async data client preview ([7088e39](https://github.com/googleapis/python-bigtable/commit/7088e39c6bac10e5f830e8fa68e181412910ec5a)) +* Adding feature flags for routing cookie and retry info ([#905](https://github.com/googleapis/python-bigtable/issues/905)) ([1859e67](https://github.com/googleapis/python-bigtable/commit/1859e67961629663a8749eea849b5b005fcbc09f)) + + +### Bug Fixes + +* Fix `ValueError` in `test__validate_universe_domain` ([#929](https://github.com/googleapis/python-bigtable/issues/929)) ([aa76a5a](https://github.com/googleapis/python-bigtable/commit/aa76a5aaa349386d5972d96e1255389e30df8764)) + +## [2.22.0](https://github.com/googleapis/python-bigtable/compare/v2.21.0...v2.22.0) (2023-12-12) + + +### Features + +* Add support for Cloud Bigtable Request Priorities in App Profiles ([#871](https://github.com/googleapis/python-bigtable/issues/871)) ([a4d551e](https://github.com/googleapis/python-bigtable/commit/a4d551e34006202ee96a395a2107d7acdc5881de)) +* Add support for Python 3.12 ([#888](https://github.com/googleapis/python-bigtable/issues/888)) ([4f050aa](https://github.com/googleapis/python-bigtable/commit/4f050aa5aed9a9dcf209779d5c10e5de8e2ff19e)) +* Introduce compatibility with native namespace packages ([#893](https://github.com/googleapis/python-bigtable/issues/893)) ([d218f4e](https://github.com/googleapis/python-bigtable/commit/d218f4ebd4ed6705721dca9318df955b40b0d0ac)) +* Publish CopyBackup protos to external customers ([#855](https://github.com/googleapis/python-bigtable/issues/855)) ([4105df7](https://github.com/googleapis/python-bigtable/commit/4105df762f1318c49bba030063897f0c50e4daee)) + + +### Bug Fixes + +* Add feature flag for improved mutate rows throttling ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b)) +* Add lock to flow control ([#899](https://github.com/googleapis/python-bigtable/issues/899)) ([e4e63c7](https://github.com/googleapis/python-bigtable/commit/e4e63c7b5b91273b3aae04fda59cc5a21c848de2)) +* Mutations batcher race condition ([#896](https://github.com/googleapis/python-bigtable/issues/896)) ([fe58f61](https://github.com/googleapis/python-bigtable/commit/fe58f617c7364d7e99e2ec50abd5f080852bf033)) +* Require google-cloud-core 1.4.4 ([#866](https://github.com/googleapis/python-bigtable/issues/866)) ([09f8a46](https://github.com/googleapis/python-bigtable/commit/09f8a4667d8b68a9f2048ba1aa57db4f775a2c03)) +* Use `retry_async` instead of `retry` in async client ([597efd1](https://github.com/googleapis/python-bigtable/commit/597efd11d15f20549010b4301be4d9768326e6a2)) + + +### Documentation + +* Minor formatting ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b)) + ## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 504fb3742..07ac8f218 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 and 3.14 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.11 -- -k + $ nox -s unit-3.14 -- -k .. note:: @@ -143,12 +143,12 @@ Running System Tests $ nox -s system # Run a single system test - $ nox -s system-3.8 -- -k + $ nox -s system-3.9 -- -k .. note:: - System tests are only configured to run under Python 3.8. + System tests are only configured to run under Python 3.9. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local @@ -226,12 +226,18 @@ We support: - `Python 3.9`_ - `Python 3.10`_ - `Python 3.11`_ +- `Python 3.12`_ +- `Python 3.13`_ +- `Python 3.14`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ +.. _Python 3.13: https://docs.python.org/3.13/ +.. _Python 3.14: https://docs.python.org/3.14/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/MANIFEST.in b/MANIFEST.in index e0a667053..d6814cd60 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/README.rst b/README.rst index 5f7d5809d..823b52c88 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,7 @@ +:**NOTE**: **This github repository is archived. The repository contents and history have moved to** `google-cloud-python`_. + +.. _google-cloud-python: https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-bigtable + Python Client for Google Cloud Bigtable ======================================= @@ -20,6 +24,30 @@ Analytics, Maps, and Gmail. .. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest .. _Product Documentation: https://cloud.google.com/bigtable/docs + +Async Data Client +------------------------- + +:code:`v2.23.0` includes a release of the new :code:`BigtableDataClientAsync` client, accessible at the import path +:code:`google.cloud.bigtable.data`. + +The new client brings a simplified API and increased performance using asyncio. +The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations +remaining exclusively in the existing synchronous client. + +Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, +or through the Github `issue tracker`_. + + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + +.. _issue tracker: https://github.com/googleapis/python-bigtable/issues + + Quick Start ----------- @@ -94,14 +122,3 @@ Next Steps to see other available methods on the client. - Read the `Product documentation`_ to learn more about the product and see How-to Guides. - -``google-cloud-happybase`` --------------------------- - -In addition to the core ``google-cloud-bigtable``, we provide a -`google-cloud-happybase -`__ library -with the same interface as the popular `HappyBase -`__ library. Unlike HappyBase, -``google-cloud-happybase`` uses ``google-cloud-bigtable`` under the covers, -rather than Apache HBase. diff --git a/docs/admin_client/admin_client_usage.rst b/docs/admin_client/admin_client_usage.rst new file mode 100644 index 000000000..8c6f4a5dc --- /dev/null +++ b/docs/admin_client/admin_client_usage.rst @@ -0,0 +1,11 @@ +Admin Client +============ +.. toctree:: + :maxdepth: 2 + + services_ + types_ + +.. + This should be the only handwritten RST file in this directory. + Everything else should be autogenerated. diff --git a/docs/admin_client/bigtable_instance_admin.rst b/docs/admin_client/bigtable_instance_admin.rst new file mode 100644 index 000000000..42f7caad7 --- /dev/null +++ b/docs/admin_client/bigtable_instance_admin.rst @@ -0,0 +1,10 @@ +BigtableInstanceAdmin +--------------------------------------- + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers + :members: + :inherited-members: diff --git a/docs/admin_client/bigtable_table_admin.rst b/docs/admin_client/bigtable_table_admin.rst new file mode 100644 index 000000000..0fa4b276a --- /dev/null +++ b/docs/admin_client/bigtable_table_admin.rst @@ -0,0 +1,10 @@ +BigtableTableAdmin +------------------------------------ + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers + :members: + :inherited-members: diff --git a/docs/admin_client/services_.rst b/docs/admin_client/services_.rst new file mode 100644 index 000000000..ea55c7da1 --- /dev/null +++ b/docs/admin_client/services_.rst @@ -0,0 +1,7 @@ +Services for Google Cloud Bigtable Admin v2 API +=============================================== +.. toctree:: + :maxdepth: 2 + + bigtable_instance_admin + bigtable_table_admin diff --git a/docs/admin_client/types_.rst b/docs/admin_client/types_.rst new file mode 100644 index 000000000..ef32b9684 --- /dev/null +++ b/docs/admin_client/types_.rst @@ -0,0 +1,10 @@ +Types for Google Cloud Bigtable Admin v2 API +============================================ + +.. automodule:: google.cloud.bigtable_admin_v2.types + :members: + :show-inheritance: + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.types + :members: + :show-inheritance: diff --git a/docs/app-profile.rst b/docs/classic_client/app-profile.rst similarity index 100% rename from docs/app-profile.rst rename to docs/classic_client/app-profile.rst diff --git a/docs/backup.rst b/docs/classic_client/backup.rst similarity index 100% rename from docs/backup.rst rename to docs/classic_client/backup.rst diff --git a/docs/batcher.rst b/docs/classic_client/batcher.rst similarity index 100% rename from docs/batcher.rst rename to docs/classic_client/batcher.rst diff --git a/docs/client-intro.rst b/docs/classic_client/client-intro.rst similarity index 100% rename from docs/client-intro.rst rename to docs/classic_client/client-intro.rst diff --git a/docs/client.rst b/docs/classic_client/client.rst similarity index 100% rename from docs/client.rst rename to docs/classic_client/client.rst diff --git a/docs/cluster.rst b/docs/classic_client/cluster.rst similarity index 100% rename from docs/cluster.rst rename to docs/classic_client/cluster.rst diff --git a/docs/column-family.rst b/docs/classic_client/column-family.rst similarity index 100% rename from docs/column-family.rst rename to docs/classic_client/column-family.rst diff --git a/docs/data-api.rst b/docs/classic_client/data-api.rst similarity index 97% rename from docs/data-api.rst rename to docs/classic_client/data-api.rst index 01a49178f..9b50e9ec9 100644 --- a/docs/data-api.rst +++ b/docs/classic_client/data-api.rst @@ -1,6 +1,13 @@ Data API ======== +.. note:: + This page describes how to use the Data API with the synchronous Bigtable client. + Examples for using the Data API with the async client can be found in the + `Getting Started Guide`_. + +.. _Getting Started Guide: https://cloud.google.com/bigtable/docs/samples-python-hello + After creating a :class:`Table ` and some column families, you are ready to store and retrieve data. diff --git a/docs/encryption-info.rst b/docs/classic_client/encryption-info.rst similarity index 100% rename from docs/encryption-info.rst rename to docs/classic_client/encryption-info.rst diff --git a/docs/instance-api.rst b/docs/classic_client/instance-api.rst similarity index 100% rename from docs/instance-api.rst rename to docs/classic_client/instance-api.rst diff --git a/docs/instance.rst b/docs/classic_client/instance.rst similarity index 100% rename from docs/instance.rst rename to docs/classic_client/instance.rst diff --git a/docs/row-data.rst b/docs/classic_client/row-data.rst similarity index 100% rename from docs/row-data.rst rename to docs/classic_client/row-data.rst diff --git a/docs/row-filters.rst b/docs/classic_client/row-filters.rst similarity index 100% rename from docs/row-filters.rst rename to docs/classic_client/row-filters.rst diff --git a/docs/row-set.rst b/docs/classic_client/row-set.rst similarity index 100% rename from docs/row-set.rst rename to docs/classic_client/row-set.rst diff --git a/docs/row.rst b/docs/classic_client/row.rst similarity index 100% rename from docs/row.rst rename to docs/classic_client/row.rst diff --git a/docs/snippets.py b/docs/classic_client/snippets.py similarity index 99% rename from docs/snippets.py rename to docs/classic_client/snippets.py index 1d93fdf12..c6059409d 100644 --- a/docs/snippets.py +++ b/docs/classic_client/snippets.py @@ -29,7 +29,7 @@ """ -import datetime +from datetime import datetime, timezone import pytest from google.api_core.exceptions import DeadlineExceeded @@ -39,7 +39,7 @@ from test_utils.system import unique_resource_id from test_utils.retry import RetryErrors -from google.cloud._helpers import UTC + from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -57,8 +57,8 @@ STORAGE_TYPE = enums.StorageType.SSD LABEL_KEY = "python-snippet" LABEL_STAMP = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) + datetime.now(timezone.utc) + .replace(microsecond=0) .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(LABEL_STAMP)} @@ -448,7 +448,6 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): - # [START bigtable_api_list_tables] from google.cloud.bigtable import Client diff --git a/docs/snippets_table.py b/docs/classic_client/snippets_table.py similarity index 99% rename from docs/snippets_table.py rename to docs/classic_client/snippets_table.py index f27260425..1850e836b 100644 --- a/docs/snippets_table.py +++ b/docs/classic_client/snippets_table.py @@ -29,7 +29,7 @@ """ -import datetime +from datetime import datetime, timezone import pytest from google.api_core.exceptions import TooManyRequests @@ -37,7 +37,6 @@ from test_utils.system import unique_resource_id from test_utils.retry import RetryErrors -from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums from google.cloud.bigtable import column_family @@ -54,8 +53,8 @@ STORAGE_TYPE = enums.StorageType.SSD LABEL_KEY = "python-snippet" LABEL_STAMP = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) + datetime.now(timezone.utc) + .replace(microsecond=0) .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(LABEL_STAMP)} @@ -179,7 +178,7 @@ def test_bigtable_write_read_drop_truncate(): value = "value_{}".format(i).encode() row = table.row(row_key) row.set_cell( - COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.now(timezone.utc) ) rows.append(row) response = table.mutate_rows(rows) @@ -270,7 +269,7 @@ def test_bigtable_mutations_batcher(): row_key = row_keys[0] row = table.row(row_key) row.set_cell( - COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.now(timezone.utc) ) batcher.mutate(row) # Add a collections of rows @@ -279,7 +278,7 @@ def test_bigtable_mutations_batcher(): row = table.row(row_keys[i]) value = "value_{}".format(i).encode() row.set_cell( - COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.now(timezone.utc) ) rows.append(row) batcher.mutate_rows(rows) @@ -759,7 +758,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): row_key = b"row_key_1" row = table.row(row_key) row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.now(timezone.utc) ) # In batcher, mutate will flush current batch if it @@ -964,16 +963,15 @@ def test_bigtable_create_family_gc_nested(): def test_bigtable_row_data_cells_cell_value_cell_values(): - value = b"value_in_col1" row = Config.TABLE.row(b"row_key_1") row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.now(timezone.utc) ) row.commit() row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.now(timezone.utc) ) row.commit() @@ -1051,7 +1049,7 @@ def test_bigtable_row_setcell_rowkey(): cell_val = b"cell-val" row.set_cell( - COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow() + COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.now(timezone.utc) ) # [END bigtable_api_row_set_cell] diff --git a/docs/table-api.rst b/docs/classic_client/table-api.rst similarity index 100% rename from docs/table-api.rst rename to docs/classic_client/table-api.rst diff --git a/docs/table.rst b/docs/classic_client/table.rst similarity index 100% rename from docs/table.rst rename to docs/classic_client/table.rst diff --git a/docs/usage.rst b/docs/classic_client/usage.rst similarity index 91% rename from docs/usage.rst rename to docs/classic_client/usage.rst index 73a32b039..7a47f4d4a 100644 --- a/docs/usage.rst +++ b/docs/classic_client/usage.rst @@ -1,10 +1,15 @@ -Using the API -============= +Classic Client +============== .. toctree:: :maxdepth: 2 client-intro + + instance-api + table-api + data-api + client cluster instance diff --git a/docs/conf.py b/docs/conf.py index b5a870f58..d8f0352cd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/data_client/async_data_authorized_view.rst b/docs/data_client/async_data_authorized_view.rst new file mode 100644 index 000000000..7d7312970 --- /dev/null +++ b/docs/data_client/async_data_authorized_view.rst @@ -0,0 +1,11 @@ +Authorized View Async +~~~~~~~~~~~~~~~~~~~~~ + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + +.. autoclass:: google.cloud.bigtable.data._async.client.AuthorizedViewAsync + :members: + :inherited-members: diff --git a/docs/data_client/async_data_client.rst b/docs/data_client/async_data_client.rst new file mode 100644 index 000000000..2ddcc090c --- /dev/null +++ b/docs/data_client/async_data_client.rst @@ -0,0 +1,12 @@ +Bigtable Data Client Async +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + +.. autoclass:: google.cloud.bigtable.data.BigtableDataClientAsync + :members: + :show-inheritance: diff --git a/docs/data_client/async_data_execute_query_iterator.rst b/docs/data_client/async_data_execute_query_iterator.rst new file mode 100644 index 000000000..b911fab7f --- /dev/null +++ b/docs/data_client/async_data_execute_query_iterator.rst @@ -0,0 +1,6 @@ +Execute Query Iterator Async +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIteratorAsync + :members: + :show-inheritance: diff --git a/docs/data_client/async_data_mutations_batcher.rst b/docs/data_client/async_data_mutations_batcher.rst new file mode 100644 index 000000000..3e81f885a --- /dev/null +++ b/docs/data_client/async_data_mutations_batcher.rst @@ -0,0 +1,6 @@ +Mutations Batcher Async +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data._async.mutations_batcher + :members: + :show-inheritance: diff --git a/docs/data_client/async_data_table.rst b/docs/data_client/async_data_table.rst new file mode 100644 index 000000000..37c396570 --- /dev/null +++ b/docs/data_client/async_data_table.rst @@ -0,0 +1,11 @@ +Table Async +~~~~~~~~~~~ + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + +.. autoclass:: google.cloud.bigtable.data._async.client.TableAsync + :members: + :inherited-members: diff --git a/docs/data_client/common_data_exceptions.rst b/docs/data_client/common_data_exceptions.rst new file mode 100644 index 000000000..6180ef222 --- /dev/null +++ b/docs/data_client/common_data_exceptions.rst @@ -0,0 +1,6 @@ +Custom Exceptions +~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.exceptions + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_execute_query_metadata.rst b/docs/data_client/common_data_execute_query_metadata.rst new file mode 100644 index 000000000..69add630d --- /dev/null +++ b/docs/data_client/common_data_execute_query_metadata.rst @@ -0,0 +1,6 @@ +Execute Query Metadata +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.metadata + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_execute_query_values.rst b/docs/data_client/common_data_execute_query_values.rst new file mode 100644 index 000000000..6c4fb71c1 --- /dev/null +++ b/docs/data_client/common_data_execute_query_values.rst @@ -0,0 +1,6 @@ +Execute Query Values +~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.values + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_mutations.rst b/docs/data_client/common_data_mutations.rst new file mode 100644 index 000000000..9d7a9eab2 --- /dev/null +++ b/docs/data_client/common_data_mutations.rst @@ -0,0 +1,6 @@ +Mutations +~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.mutations + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_read_modify_write_rules.rst b/docs/data_client/common_data_read_modify_write_rules.rst new file mode 100644 index 000000000..2f28ddf3f --- /dev/null +++ b/docs/data_client/common_data_read_modify_write_rules.rst @@ -0,0 +1,6 @@ +Read Modify Write Rules +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.read_modify_write_rules + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_read_rows_query.rst b/docs/data_client/common_data_read_rows_query.rst new file mode 100644 index 000000000..4e3e796d9 --- /dev/null +++ b/docs/data_client/common_data_read_rows_query.rst @@ -0,0 +1,6 @@ +Read Rows Query +~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.read_rows_query + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_row.rst b/docs/data_client/common_data_row.rst new file mode 100644 index 000000000..63bc71143 --- /dev/null +++ b/docs/data_client/common_data_row.rst @@ -0,0 +1,6 @@ +Rows and Cells +~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.row + :members: + :show-inheritance: diff --git a/docs/data_client/common_data_row_filters.rst b/docs/data_client/common_data_row_filters.rst new file mode 100644 index 000000000..22bda8a26 --- /dev/null +++ b/docs/data_client/common_data_row_filters.rst @@ -0,0 +1,62 @@ +Bigtable Row Filters +==================== + +It is possible to use a +:class:`RowFilter ` +when constructing a :class:`ReadRowsQuery ` + +The following basic filters +are provided: + +* :class:`SinkFilter <.data.row_filters.SinkFilter>` +* :class:`PassAllFilter <.data.row_filters.PassAllFilter>` +* :class:`BlockAllFilter <.data.row_filters.BlockAllFilter>` +* :class:`RowKeyRegexFilter <.data.row_filters.RowKeyRegexFilter>` +* :class:`RowSampleFilter <.data.row_filters.RowSampleFilter>` +* :class:`FamilyNameRegexFilter <.data.row_filters.FamilyNameRegexFilter>` +* :class:`ColumnQualifierRegexFilter <.data.row_filters.ColumnQualifierRegexFilter>` +* :class:`TimestampRangeFilter <.data.row_filters.TimestampRangeFilter>` +* :class:`ColumnRangeFilter <.data.row_filters.ColumnRangeFilter>` +* :class:`ValueRegexFilter <.data.row_filters.ValueRegexFilter>` +* :class:`ValueRangeFilter <.data.row_filters.ValueRangeFilter>` +* :class:`CellsRowOffsetFilter <.data.row_filters.CellsRowOffsetFilter>` +* :class:`CellsRowLimitFilter <.data.row_filters.CellsRowLimitFilter>` +* :class:`CellsColumnLimitFilter <.data.row_filters.CellsColumnLimitFilter>` +* :class:`StripValueTransformerFilter <.data.row_filters.StripValueTransformerFilter>` +* :class:`ApplyLabelFilter <.data.row_filters.ApplyLabelFilter>` + +In addition, these filters can be combined into composite filters with + +* :class:`RowFilterChain <.data.row_filters.RowFilterChain>` +* :class:`RowFilterUnion <.data.row_filters.RowFilterUnion>` +* :class:`ConditionalRowFilter <.data.row_filters.ConditionalRowFilter>` + +These rules can be nested arbitrarily, with a basic filter at the lowest +level. For example: + +.. code:: python + + # Filter in a specified column (matching any column family). + col1_filter = ColumnQualifierRegexFilter(b'columnbia') + + # Create a filter to label results. + label1 = u'label-red' + label1_filter = ApplyLabelFilter(label1) + + # Combine the filters to label all the cells in columnbia. + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Create a similar filter to label cells blue. + col2_filter = ColumnQualifierRegexFilter(b'columnseeya') + label2 = u'label-blue' + label2_filter = ApplyLabelFilter(label2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + +---- + +.. automodule:: google.cloud.bigtable.data.row_filters + :members: + :show-inheritance: diff --git a/docs/data_client/data_client_usage.rst b/docs/data_client/data_client_usage.rst new file mode 100644 index 000000000..708dafc62 --- /dev/null +++ b/docs/data_client/data_client_usage.rst @@ -0,0 +1,41 @@ +Data Client +=========== + +Sync Surface +------------ + +.. toctree:: + :maxdepth: 3 + + sync_data_client + sync_data_table + sync_data_authorized_view + sync_data_mutations_batcher + sync_data_execute_query_iterator + +Async Surface +------------- + +.. toctree:: + :maxdepth: 3 + + async_data_client + async_data_table + async_data_authorized_view + async_data_mutations_batcher + async_data_execute_query_iterator + +Common Classes +-------------- + +.. toctree:: + :maxdepth: 3 + + common_data_read_rows_query + common_data_row + common_data_row_filters + common_data_mutations + common_data_read_modify_write_rules + common_data_exceptions + common_data_execute_query_values + common_data_execute_query_metadata diff --git a/docs/data_client/sync_data_authorized_view.rst b/docs/data_client/sync_data_authorized_view.rst new file mode 100644 index 000000000..c0ac29721 --- /dev/null +++ b/docs/data_client/sync_data_authorized_view.rst @@ -0,0 +1,6 @@ +Authorized View +~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data._sync_autogen.client.AuthorizedView + :members: + :inherited-members: diff --git a/docs/data_client/sync_data_client.rst b/docs/data_client/sync_data_client.rst new file mode 100644 index 000000000..cf7c00dad --- /dev/null +++ b/docs/data_client/sync_data_client.rst @@ -0,0 +1,6 @@ +Bigtable Data Client +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.BigtableDataClient + :members: + :show-inheritance: diff --git a/docs/data_client/sync_data_execute_query_iterator.rst b/docs/data_client/sync_data_execute_query_iterator.rst new file mode 100644 index 000000000..6eb9f84db --- /dev/null +++ b/docs/data_client/sync_data_execute_query_iterator.rst @@ -0,0 +1,6 @@ +Execute Query Iterator +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIterator + :members: + :show-inheritance: diff --git a/docs/data_client/sync_data_mutations_batcher.rst b/docs/data_client/sync_data_mutations_batcher.rst new file mode 100644 index 000000000..2b7d1bfe0 --- /dev/null +++ b/docs/data_client/sync_data_mutations_batcher.rst @@ -0,0 +1,6 @@ +Mutations Batcher +~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data._sync_autogen.mutations_batcher + :members: + :show-inheritance: diff --git a/docs/data_client/sync_data_table.rst b/docs/data_client/sync_data_table.rst new file mode 100644 index 000000000..95c91eb27 --- /dev/null +++ b/docs/data_client/sync_data_table.rst @@ -0,0 +1,6 @@ +Table +~~~~~ + +.. autoclass:: google.cloud.bigtable.data.Table + :members: + :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index b1c8f0574..0694c8bb0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,30 +2,26 @@ .. include:: multiprocessing.rst -Using the API +Client Types ------------- .. toctree:: - :maxdepth: 2 - - usage - - -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - instance-api - table-api - data-api + :maxdepth: 3 + data_client/data_client_usage + classic_client/usage + admin_client/admin_client_usage Changelog --------- -For a list of all ``google-cloud-datastore`` releases: +For a list of all ``google-cloud-bigtable`` releases: .. toctree:: :maxdepth: 2 changelog + +.. toctree:: + :hidden: + + summary_overview.md diff --git a/docs/scripts/patch_devsite_toc.py b/docs/scripts/patch_devsite_toc.py new file mode 100644 index 000000000..fbb753daf --- /dev/null +++ b/docs/scripts/patch_devsite_toc.py @@ -0,0 +1,277 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script will run after ``nox -s docfx`` is run. docfx is the api doc format used by +google cloud. It is described here: https://github.com/googleapis/docuploader?tab=readme-ov-file#requirements-for-docfx-yaml-tarballs. + +One of the file used by docfx is toc.yml which is used to generate the table of contents sidebar. +This script will patch file to create subfolders for each of the clients +""" + + +import glob +import yaml +import os +import shutil + +# set working directory to /docs +os.chdir(f"{os.path.dirname(os.path.abspath(__file__))}/{os.pardir}") + + +def add_sections(toc_file_path, section_list, output_file_path=None): + """ + Add new sections to the autogenerated docfx table of contents file + + Takes in a list of TocSection objects, which should point to a directory of rst files + within the main /docs directory, which represents a self-contained section of content + + :param toc_file_path: path to the autogenerated toc file + :param section_list: list of TocSection objects to add + :param output_file_path: path to save the updated toc file. If None, save to the input file + """ + # remove any sections that are already in the toc + remove_sections(toc_file_path, [section.title for section in section_list]) + # add new sections + current_toc = yaml.safe_load(open(toc_file_path, "r")) + for section in section_list: + print(f"Adding section {section.title}...") + current_toc[0]["items"].insert(-1, section.to_dict()) + section.copy_markdown() + # save file + if output_file_path is None: + output_file_path = toc_file_path + with open(output_file_path, "w") as f: + yaml.dump(current_toc, f) + + +def remove_sections(toc_file_path, section_list, output_file_path=None): + """ + Remove sections from the autogenerated docfx table of contents file + + Takes in a list of string section names to remove from the toc file + + :param toc_file_path: path to the autogenerated toc file + :param section_list: list of section names to remove + :param output_file_path: path to save the updated toc file. If None, save to the input file + """ + current_toc = yaml.safe_load(open(toc_file_path, "r")) + print(f"Removing sections {section_list}...") + new_items = [d for d in current_toc[0]["items"] if d["name"] not in section_list] + current_toc[0]["items"] = new_items + # save file + if output_file_path is None: + output_file_path = toc_file_path + with open(output_file_path, "w") as f: + yaml.dump(current_toc, f) + + +class TocSection: + def __init__(self, dir_name, index_file_name): + """ + :param dir_name: name of the directory containing the rst files + :param index_file_name: name of an index file within dir_name. This file + will not be included in the table of contents, but provides an ordered + list of the other files which should be included + """ + self.dir_name = dir_name + self.index_file_name = index_file_name + index_file_path = os.path.join(dir_name, index_file_name) + # find set of files referenced by the index file + with open(index_file_path, "r") as f: + self.title = None + in_toc = False + self.items = [] + for line in f: + # ignore empty lines + if not line.strip(): + continue + # add files explictly included in the toc + if line.startswith(".. include::"): + file_base = os.path.splitext(line.split("::")[1].strip())[0] + self.items.append( + self.extract_toc_entry( + file_base, file_title=file_base.capitalize() + ) + ) + continue + if line.startswith(".. toctree::"): + in_toc = True + continue + # ignore directives + if ":" in line: + continue + # set tile as first line with no directive + if self.title is None: + self.title = line.strip() + if not in_toc: + continue + # bail when toc indented block is done + if not line.startswith(" ") and not line.startswith("\t"): + in_toc = False + continue + # extract entries + self.items.append(self.extract_toc_entry(line.strip())) + + def extract_toc_entry(self, file_name, file_title=None): + """ + Given the name of a file, extract the title and href for the toc entry, + and return as a dictionary + """ + # load the file to get the title + with open(f"{self.dir_name}/{file_name}.rst", "r") as f2: + if file_title is None: + # use first line as title if not provided + file_title = f2.readline().strip() + return {"name": file_title, "href": f"{file_name}.md"} + + def to_dict(self): + """ + Convert the TocSection object to a dictionary that can be written to a yaml file + """ + return {"name": self.title, "items": self.items} + + def copy_markdown(self): + """ + Copy markdown files from _build/markdown/dir_name to _build/html/docfx_yaml + + This is necessary because the markdown files in sub-directories + are not copied over by the docfx build by default + """ + for file in os.listdir("_build/markdown/" + self.dir_name): + shutil.copy( + f"_build/markdown/{self.dir_name}/{file}", + f"_build/html/docfx_yaml", + ) + + def validate_section(self, toc): + # Make sure each rst file is listed in the toc. + items_in_toc = [ + d["items"] for d in toc[0]["items"] if d["name"] == self.title and ".rst" + ][0] + items_in_dir = [f for f in os.listdir(self.dir_name) if f.endswith(".rst")] + # subtract 1 for index + assert len(items_in_toc) == len(items_in_dir) - 1 + for file in items_in_dir: + if file != self.index_file_name: + base_name, _ = os.path.splitext(file) + assert any(d["href"] == f"{base_name}.md" for d in items_in_toc) + # make sure the markdown files are present in the docfx_yaml directory + md_files = [d["href"] for d in items_in_toc] + for file in md_files: + assert os.path.exists(f"_build/html/docfx_yaml/{file}") + + +class UIDFilteredTocSection(TocSection): + def __init__(self, toc_file_path, section_name, title, uid_prefix): + """Creates a filtered section denoted by section_name in the toc_file_path to items with the given UID prefix. + + The section is then renamed to the title. + """ + current_toc = yaml.safe_load(open(toc_file_path, "r")) + self.uid_prefix = uid_prefix + + # Since we are looking for a specific section_name there should only + # be one match. + section_items = [ + d for d in current_toc[0]["items"] if d["name"] == section_name + ][0]["items"] + filtered_items = [d for d in section_items if d["uid"].startswith(uid_prefix)] + self.items = filtered_items + self.title = title + + def copy_markdown(self): + """ + No-op because we are filtering on UIDs, not markdown files. + """ + pass + + def validate_section(self, toc): + uids_in_toc = set() + + # A UID-filtered TOC tree looks like the following: + # - items: + # items: + # name: + # uid: + # + # Walk through the TOC tree to find all UIDs recursively. + def find_uids_in_items(items): + uids_in_toc.add(items["uid"]) + for subitem in items.get("items", []): + find_uids_in_items(subitem) + + items_in_toc = [d["items"] for d in toc[0]["items"] if d["name"] == self.title][ + 0 + ] + for item in items_in_toc: + find_uids_in_items(item) + + # Now that we have all the UIDs, first match all of them + # with corresponding .yml files. + for uid in uids_in_toc: + assert os.path.exists(f"_build/html/docfx_yaml/{uid}.yml") + + # Also validate that every uid yml file that starts with the uid_prefix + # exists in the section. + for filename in glob.glob( + f"{self.uid_prefix}*.yml", root_dir="_build/html/docfx_yaml" + ): + assert filename[:-4] in uids_in_toc + + +def validate_toc(toc_file_path, expected_section_list, added_sections): + current_toc = yaml.safe_load(open(toc_file_path, "r")) + # make sure the set of sections matches what we expect + found_sections = [d["name"] for d in current_toc[0]["items"]] + assert ( + found_sections == expected_section_list + ), f"Expected {expected_section_list}, found {found_sections}" + # make sure each customs ection is in the toc + for section in added_sections: + assert section.title in found_sections + section.validate_section(current_toc) + print("Toc validation passed") + + +if __name__ == "__main__": + # Add secrtions for the async_data_client and classic_client directories + toc_path = "_build/html/docfx_yaml/toc.yml" + + custom_sections = [ + TocSection(dir_name="data_client", index_file_name="data_client_usage.rst"), + UIDFilteredTocSection( + toc_file_path=toc_path, + section_name="Bigtable Admin V2", + title="Admin Client", + uid_prefix="google.cloud.bigtable_admin_v2", + ), + TocSection(dir_name="classic_client", index_file_name="usage.rst"), + ] + add_sections(toc_path, custom_sections) + # Remove the Bigtable section, since it has duplicated data + remove_sections(toc_path, ["Bigtable", "Bigtable Admin V2"]) + # run validation to make sure yaml is structured as we expect + validate_toc( + toc_file_path=toc_path, + expected_section_list=[ + "Overview", + "bigtable APIs", + "Changelog", + "Multiprocessing", + "Data Client", + "Admin Client", + "Classic Client", + ], + added_sections=custom_sections, + ) diff --git a/docs/summary_overview.md b/docs/summary_overview.md new file mode 100644 index 000000000..2379e8b6b --- /dev/null +++ b/docs/summary_overview.md @@ -0,0 +1,22 @@ +[ +This is a templated file. Adding content to this file may result in it being +reverted. Instead, if you want to place additional content, create an +"overview_content.md" file in `docs/` directory. The Sphinx tool will +pick up on the content and merge the content. +]: # + +# Cloud Bigtable API + +Overview of the APIs available for Cloud Bigtable API. + +## All entries + +Classes, methods and properties & attributes for +Cloud Bigtable API. + +[classes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_class.html) + +[methods](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_method.html) + +[properties and +attributes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_property.html) diff --git a/google/__init__.py b/google/__init__.py deleted file mode 100644 index a5ba80656..000000000 --- a/google/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py deleted file mode 100644 index a5ba80656..000000000 --- a/google/cloud/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/google/cloud/bigtable/backup.py b/google/cloud/bigtable/backup.py index 6986d730a..f6fa24421 100644 --- a/google/cloud/bigtable/backup.py +++ b/google/cloud/bigtable/backup.py @@ -17,7 +17,7 @@ import re from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore -from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy @@ -28,7 +28,7 @@ r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)/" r"clusters/(?P[a-z][-a-z0-9]*)/" - r"backups/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" + r"backups/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" ) _TABLE_NAME_RE = re.compile( @@ -106,7 +106,7 @@ def name(self): if not self._cluster: raise ValueError('"cluster" parameter must be set') - return BigtableTableAdminClient.backup_path( + return BaseBigtableTableAdminClient.backup_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, @@ -141,7 +141,7 @@ def parent(self): :returns: A full path to the parent cluster. """ if not self._parent and self._cluster: - self._parent = BigtableTableAdminClient.cluster_path( + self._parent = BaseBigtableTableAdminClient.cluster_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, @@ -163,7 +163,7 @@ def source_table(self): :returns: The Table name. """ if not self._source_table and self.table_id: - self._source_table = BigtableTableAdminClient.table_path( + self._source_table = BaseBigtableTableAdminClient.table_path( project=self._instance._client.project, instance=self._instance.instance_id, table=self.table_id, @@ -226,7 +226,7 @@ def size_bytes(self): def state(self): """The current state of this Backup. - :rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State` + :rtype: :class:`~google.cloud.bigtable_admin_v2.types.table.Backup.State` :returns: The current state of this Backup. """ return self._state @@ -305,8 +305,7 @@ def create(self, cluster_id=None): created Backup. :rtype: :class:`~google.api_core.operation.Operation` - :returns: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` - instance, to be used to poll the status of the 'create' request + :returns: A future to be used to poll the status of the 'create' request :raises Conflict: if the Backup already exists :raises NotFound: if the Instance owning the Backup does not exist :raises BadRequest: if the `table` or `expire_time` values are invalid, @@ -412,7 +411,7 @@ def restore(self, table_id, instance_id=None): :param instance_id: (Optional) The ID of the Instance to restore the backup into, if different from the current one. - :rtype: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` + :rtype: :class:`~google.api_core.operation.Operation` :returns: A future to be used to poll the status of the 'restore' request. @@ -426,14 +425,14 @@ def restore(self, table_id, instance_id=None): """ api = self._instance._client.table_admin_client if instance_id: - parent = BigtableTableAdminClient.instance_path( + parent = BaseBigtableTableAdminClient.instance_path( project=self._instance._client.project, instance=instance_id, ) else: parent = self._instance.name - return api.restore_table( + return api._restore_table( request={"parent": parent, "table_id": table_id, "backup": self.name} ) diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index a6eb806e9..f9b85386d 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -53,12 +53,19 @@ def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT self.flush_count = flush_count def get(self): - """Retrieve an item from the queue. Recalculate queue size.""" - row = self._queue.get() - mutation_size = row.get_mutations_size() - self.total_mutation_count -= len(row._get_mutations()) - self.total_size -= mutation_size - return row + """ + Retrieve an item from the queue. Recalculate queue size. + + If the queue is empty, return None. + """ + try: + row = self._queue.get_nowait() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + except queue.Empty: + return None def put(self, item): """Insert an item to the queue. Recalculate queue size.""" @@ -79,9 +86,6 @@ def full(self): return True return False - def empty(self): - return self._queue.empty() - @dataclass class _BatchInfo: @@ -110,6 +114,7 @@ def __init__( self.inflight_size = 0 self.event = threading.Event() self.event.set() + self._lock = threading.Lock() def is_blocked(self): """Returns True if: @@ -128,8 +133,9 @@ def control_flow(self, batch_info): Calculate the resources used by this batch """ - self.inflight_mutations += batch_info.mutations_count - self.inflight_size += batch_info.mutations_size + with self._lock: + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size self.set_flow_control_status() def wait(self): @@ -154,8 +160,9 @@ def release(self, batch_info): Release the resources. Decrement the row size to allow enqueued mutations to be run. """ - self.inflight_mutations -= batch_info.mutations_count - self.inflight_size -= batch_info.mutations_size + with self._lock: + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size self.set_flow_control_status() @@ -292,8 +299,10 @@ def flush(self): * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ rows_to_flush = [] - while not self._rows.empty(): - rows_to_flush.append(self._rows.get()) + row = self._rows.get() + while row is not None: + rows_to_flush.append(row) + row = self._rows.get() response = self._flush_rows(rows_to_flush) return response @@ -303,58 +312,68 @@ def _flush_async(self): :raises: * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - - rows_to_flush = [] - mutations_count = 0 - mutations_size = 0 - rows_count = 0 - batch_info = _BatchInfo() - - while not self._rows.empty(): - row = self._rows.get() - mutations_count += len(row._get_mutations()) - mutations_size += row.get_mutations_size() - rows_count += 1 - rows_to_flush.append(row) - batch_info.mutations_count = mutations_count - batch_info.rows_count = rows_count - batch_info.mutations_size = mutations_size - - if ( - rows_count >= self.flush_count - or mutations_size >= self.max_row_bytes - or mutations_count >= self.flow_control.max_mutations - or mutations_size >= self.flow_control.max_mutation_bytes - or self._rows.empty() # submit when it reached the end of the queue + next_row = self._rows.get() + while next_row is not None: + # start a new batch + rows_to_flush = [next_row] + batch_info = _BatchInfo( + mutations_count=len(next_row._get_mutations()), + rows_count=1, + mutations_size=next_row.get_mutations_size(), + ) + # fill up batch with rows + next_row = self._rows.get() + while next_row is not None and self._row_fits_in_batch( + next_row, batch_info ): - # wait for resources to become available, before submitting any new batch - self.flow_control.wait() - # once unblocked, submit a batch - # event flag will be set by control_flow to block subsequent thread, but not blocking this one - self.flow_control.control_flow(batch_info) - future = self._executor.submit(self._flush_rows, rows_to_flush) - self.futures_mapping[future] = batch_info - future.add_done_callback(self._batch_completed_callback) - - # reset and start a new batch - rows_to_flush = [] - mutations_size = 0 - rows_count = 0 - mutations_count = 0 - batch_info = _BatchInfo() + rows_to_flush.append(next_row) + batch_info.mutations_count += len(next_row._get_mutations()) + batch_info.rows_count += 1 + batch_info.mutations_size += next_row.get_mutations_size() + next_row = self._rows.get() + # send batch over network + # wait for resources to become available + self.flow_control.wait() + # once unblocked, submit the batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + # schedule release of resources from flow control + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) def _batch_completed_callback(self, future): """Callback for when the mutation has finished to clean up the current batch and release items from the flow controller. - Raise exceptions if there's any. Release the resources locked by the flow control and allow enqueued tasks to be run. """ - processed_rows = self.futures_mapping[future] self.flow_control.release(processed_rows) del self.futures_mapping[future] + def _row_fits_in_batch(self, row, batch_info): + """Checks if a row can fit in the current batch. + + :type row: class + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + + :type batch_info: :class:`_BatchInfo` + :param batch_info: Information about the current batch. + + :rtype: bool + :returns: True if the row can fit in the current batch. + """ + new_rows_count = batch_info.rows_count + 1 + new_mutations_count = batch_info.mutations_count + len(row._get_mutations()) + new_mutations_size = batch_info.mutations_size + row.get_mutations_size() + return ( + new_rows_count <= self.flush_count + and new_mutations_size <= self.max_row_bytes + and new_mutations_count <= self.flow_control.max_mutations + and new_mutations_size <= self.flow_control.max_mutation_bytes + ) + def _flush_rows(self, rows_to_flush): """Mutate the specified rows. diff --git a/google/cloud/bigtable/client.py b/google/cloud/bigtable/client.py index c82a268c6..37de10b6e 100644 --- a/google/cloud/bigtable/client.py +++ b/google/cloud/bigtable/client.py @@ -32,7 +32,6 @@ import grpc # type: ignore from google.api_core.gapic_v1 import client_info as client_info_lib -import google.auth # type: ignore from google.auth.credentials import AnonymousCredentials # type: ignore from google.cloud import bigtable_v2 @@ -215,58 +214,21 @@ def _get_scopes(self): return scopes def _emulator_channel(self, transport, options): - """Create a channel using self._credentials + """Create a channel for use with the Bigtable emulator. - Works in a similar way to ``grpc.secure_channel`` but using - ``grpc.local_channel_credentials`` rather than - ``grpc.ssh_channel_credentials`` to allow easy connection to a - local emulator. + Insecure channels are used for the emulator as secure channels + cannot be used to communicate on some environments. + https://github.com/googleapis/python-firestore/issues/359 Returns: grpc.Channel or grpc.aio.Channel """ - # TODO: Implement a special credentials type for emulator and use - # "transport.create_channel" to create gRPC channels once google-auth - # extends it's allowed credentials types. # Note: this code also exists in the firestore client. if "GrpcAsyncIOTransport" in str(transport.__name__): - return grpc.aio.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) + channel_fn = grpc.aio.insecure_channel else: - return grpc.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) - - def _local_composite_credentials(self): - """Create credentials for the local emulator channel. - - :return: grpc.ChannelCredentials - """ - credentials = google.auth.credentials.with_scopes_if_required( - self._credentials, None - ) - request = google.auth.transport.requests.Request() - - # Create the metadata plugin for inserting the authorization header. - metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin( - credentials, request - ) - - # Create a set of grpc.CallCredentials using the metadata plugin. - google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) - - # Using the local_credentials to allow connection to emulator - local_credentials = grpc.local_channel_credentials() - - # Combine the local credentials and the authorization credentials. - return grpc.composite_channel_credentials( - local_credentials, google_auth_credentials - ) + channel_fn = grpc.insecure_channel + return channel_fn(self._emulator_host, options=options) def _create_gapic_client_channel(self, client_class, grpc_transport): if self._emulator_host is not None: @@ -363,11 +325,11 @@ def table_admin_client(self): raise ValueError("Client is not an admin client.") transport = self._create_gapic_client_channel( - bigtable_admin_v2.BigtableTableAdminClient, + bigtable_admin_v2.BaseBigtableTableAdminClient, BigtableTableAdminGrpcTransport, ) klass = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient, + bigtable_admin_v2.BaseBigtableTableAdminClient, client_options=self._admin_client_options, transport=transport, ) diff --git a/google/cloud/bigtable/cluster.py b/google/cloud/bigtable/cluster.py index 11fb5492d..967ec707e 100644 --- a/google/cloud/bigtable/cluster.py +++ b/google/cloud/bigtable/cluster.py @@ -511,9 +511,11 @@ def delete(self): def _to_pb(self): """Create cluster proto buff message for API calls""" client = self._instance._client - location = client.instance_admin_client.common_location_path( - client.project, self.location_id - ) + location = None + if self.location_id: + location = client.instance_admin_client.common_location_path( + client.project, self.location_id + ) cluster_pb = instance.Cluster( location=location, diff --git a/google/cloud/bigtable/data/README.rst b/google/cloud/bigtable/data/README.rst new file mode 100644 index 000000000..8142cc34d --- /dev/null +++ b/google/cloud/bigtable/data/README.rst @@ -0,0 +1,9 @@ +Async Data Client +================= + +Synchronous API surface and usage examples coming soon + +Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, +or through the Github `issue tracker`_. + +.. _issue tracker: https://github.com/googleapis/python-bigtable/issues diff --git a/google/cloud/bigtable/data/__init__.py b/google/cloud/bigtable/data/__init__.py new file mode 100644 index 000000000..c18eae683 --- /dev/null +++ b/google/cloud/bigtable/data/__init__.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable import gapic_version as package_version + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync +from google.cloud.bigtable.data._async.client import AuthorizedViewAsync +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync +from google.cloud.bigtable.data._sync_autogen.client import BigtableDataClient +from google.cloud.bigtable.data._sync_autogen.client import Table +from google.cloud.bigtable.data._sync_autogen.client import AuthorizedView +from google.cloud.bigtable.data._sync_autogen.mutations_batcher import MutationsBatcher + +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.read_rows_query import RowRange +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.row import Cell + +from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import AddToCell +from google.cloud.bigtable.data.mutations import SetCell +from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn +from google.cloud.bigtable.data.mutations import DeleteAllFromFamily +from google.cloud.bigtable.data.mutations import DeleteAllFromRow + +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data.exceptions import FailedQueryShardError + +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed + +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import RowKeySamples +from google.cloud.bigtable.data._helpers import ShardedQuery + +# setup custom CrossSync mappings for library +from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient, +) +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync + +from google.cloud.bigtable_v2.services.bigtable.client import ( + BigtableClient, +) +from google.cloud.bigtable.data._sync_autogen._read_rows import _ReadRowsOperation +from google.cloud.bigtable.data._sync_autogen._mutate_rows import _MutateRowsOperation + +from google.cloud.bigtable.data._cross_sync import CrossSync + +CrossSync.add_mapping("GapicClient", BigtableAsyncClient) +CrossSync._Sync_Impl.add_mapping("GapicClient", BigtableClient) +CrossSync.add_mapping("_ReadRowsOperation", _ReadRowsOperationAsync) +CrossSync._Sync_Impl.add_mapping("_ReadRowsOperation", _ReadRowsOperation) +CrossSync.add_mapping("_MutateRowsOperation", _MutateRowsOperationAsync) +CrossSync._Sync_Impl.add_mapping("_MutateRowsOperation", _MutateRowsOperation) +CrossSync.add_mapping("MutationsBatcher", MutationsBatcherAsync) +CrossSync._Sync_Impl.add_mapping("MutationsBatcher", MutationsBatcher) + +__version__: str = package_version.__version__ + +__all__ = ( + "BigtableDataClientAsync", + "TableAsync", + "AuthorizedViewAsync", + "MutationsBatcherAsync", + "BigtableDataClient", + "Table", + "AuthorizedView", + "MutationsBatcher", + "RowKeySamples", + "ReadRowsQuery", + "RowRange", + "Mutation", + "RowMutationEntry", + "AddToCell", + "SetCell", + "DeleteRangeFromColumn", + "DeleteAllFromFamily", + "DeleteAllFromRow", + "Row", + "Cell", + "InvalidChunk", + "FailedMutationEntryError", + "FailedQueryShardError", + "RetryExceptionGroup", + "MutationsExceptionGroup", + "ShardedReadRowsExceptionGroup", + "ParameterTypeInferenceFailed", + "ShardedQuery", + "TABLE_DEFAULT", +) diff --git a/.github/.OwlBot.yaml b/google/cloud/bigtable/data/_async/__init__.py similarity index 56% rename from .github/.OwlBot.yaml rename to google/cloud/bigtable/data/_async/__init__.py index fe2f7841a..e13c9acb7 100644 --- a/.github/.OwlBot.yaml +++ b/google/cloud/bigtable/data/_async/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -docker: - image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync -deep-remove-regex: - - /owl-bot-staging +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync -deep-copy-regex: - - source: /google/bigtable/admin/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/bigtable_admin/$1/$2 - - source: /google/bigtable/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/bigtable/$1/$2 - -begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08 +__all__ = [ + "BigtableDataClientAsync", + "TableAsync", + "MutationsBatcherAsync", +] diff --git a/google/cloud/bigtable/data/_async/_mutate_rows.py b/google/cloud/bigtable/data/_async/_mutate_rows.py new file mode 100644 index 000000000..8e6833bca --- /dev/null +++ b/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -0,0 +1,229 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Sequence, TYPE_CHECKING + +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +import google.cloud.bigtable_v2.types.bigtable as types_pb +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory + +# mutate_rows requests are limited to this number of mutations +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import _EntryWithProto + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + + if CrossSync.is_async: + from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient as GapicClientType, + ) + from google.cloud.bigtable.data._async.client import ( # type: ignore + _DataApiTargetAsync as TargetType, + ) + else: + from google.cloud.bigtable_v2.services.bigtable.client import ( # type: ignore + BigtableClient as GapicClientType, + ) + from google.cloud.bigtable.data._sync_autogen.client import ( # type: ignore + _DataApiTarget as TargetType, + ) + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._mutate_rows" + + +@CrossSync.convert_class("_MutateRowsOperation") +class _MutateRowsOperationAsync: + """ + MutateRowsOperation manages the logic of sending a set of row mutations, + and retrying on failed entries. It manages this using the _run_attempt + function, which attempts to mutate all outstanding entries, and raises + _MutateRowsIncomplete if any retryable errors are encountered. + + Errors are exposed as a MutationsExceptionGroup, which contains a list of + exceptions organized by the related failed mutation entries. + + Args: + gapic_client: the client to use for the mutate_rows call + target: the table or view associated with the request + mutation_entries: a list of RowMutationEntry objects to send to the server + operation_timeout: the timeout to use for the entire operation, in seconds. + attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. + If not specified, the request will run until operation_timeout is reached. + """ + + @CrossSync.convert + def __init__( + self, + gapic_client: GapicClientType, + target: TargetType, + mutation_entries: list["RowMutationEntry"], + operation_timeout: float, + attempt_timeout: float | None, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + # check that mutations are within limits + total_mutations = sum(len(entry.mutations) for entry in mutation_entries) + if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + "mutate_rows requests can contain at most " + f"{_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across " + f"all entries. Found {total_mutations}." + ) + self._target = target + self._gapic_fn = gapic_client.mutate_rows + # create predicate for determining which errors are retryable + self.is_retryable = retries.if_exception_type( + # RPC level errors + *retryable_exceptions, + # Entry level errors + bt_exceptions._MutateRowsIncomplete, + ) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + self._operation = lambda: CrossSync.retry_target( + self._run_attempt, + self.is_retryable, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + # initialize state + self.timeout_generator = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries] + self.remaining_indices = list(range(len(self.mutations))) + self.errors: dict[int, list[Exception]] = {} + + @CrossSync.convert + async def start(self): + """ + Start the operation, and run until completion + + Raises: + MutationsExceptionGroup: if any mutations failed + """ + try: + # trigger mutate_rows + await self._operation() + except Exception as exc: + # exceptions raised by retryable are added to the list of exceptions for all unfinalized mutations + incomplete_indices = self.remaining_indices.copy() + for idx in incomplete_indices: + self._handle_entry_error(idx, exc) + finally: + # raise exception detailing incomplete mutations + all_errors: list[Exception] = [] + for idx, exc_list in self.errors.items(): + if len(exc_list) == 0: + raise core_exceptions.ClientError( + f"Mutation {idx} failed with no associated errors" + ) + elif len(exc_list) == 1: + cause_exc = exc_list[0] + else: + cause_exc = bt_exceptions.RetryExceptionGroup(exc_list) + entry = self.mutations[idx].entry + all_errors.append( + bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc) + ) + if all_errors: + raise bt_exceptions.MutationsExceptionGroup( + all_errors, len(self.mutations) + ) + + @CrossSync.convert + async def _run_attempt(self): + """ + Run a single attempt of the mutate_rows rpc. + + Raises: + _MutateRowsIncomplete: if there are failed mutations eligible for + retry after the attempt is complete + GoogleAPICallError: if the gapic rpc fails + """ + request_entries = [self.mutations[idx].proto for idx in self.remaining_indices] + # track mutations in this request that have not been finalized yet + active_request_indices = { + req_idx: orig_idx for req_idx, orig_idx in enumerate(self.remaining_indices) + } + self.remaining_indices = [] + if not request_entries: + # no more mutations. return early + return + # make gapic request + try: + result_generator = await self._gapic_fn( + request=types_pb.MutateRowsRequest( + entries=request_entries, + app_profile_id=self._target.app_profile_id, + **self._target._request_path, + ), + timeout=next(self.timeout_generator), + retry=None, + ) + async for result_list in result_generator: + for result in result_list.entries: + # convert sub-request index to global index + orig_idx = active_request_indices[result.index] + entry_error = core_exceptions.from_grpc_status( + result.status.code, + result.status.message, + details=result.status.details, + ) + if result.status.code != 0: + # mutation failed; update error list (and remaining_indices if retryable) + self._handle_entry_error(orig_idx, entry_error) + elif orig_idx in self.errors: + # mutation succeeded; remove from error list + del self.errors[orig_idx] + # remove processed entry from active list + del active_request_indices[result.index] + except Exception as exc: + # add this exception to list for each mutation that wasn't + # already handled, and update remaining_indices if mutation is retryable + for idx in active_request_indices.values(): + self._handle_entry_error(idx, exc) + # bubble up exception to be handled by retry wrapper + raise + # check if attempt succeeded, or needs to be retried + if self.remaining_indices: + # unfinished work; raise exception to trigger retry + raise bt_exceptions._MutateRowsIncomplete + + def _handle_entry_error(self, idx: int, exc: Exception): + """ + Add an exception to the list of exceptions for a given mutation index, + and add the index to the list of remaining indices if the exception is + retryable. + + Args: + idx: the index of the mutation that failed + exc: the exception to add to the list + """ + entry = self.mutations[idx].entry + self.errors.setdefault(idx, []).append(exc) + if ( + entry.is_idempotent() + and self.is_retryable(exc) + and idx not in self.remaining_indices + ): + self.remaining_indices.append(idx) diff --git a/google/cloud/bigtable/data/_async/_read_rows.py b/google/cloud/bigtable/data/_async/_read_rows.py new file mode 100644 index 000000000..8787bfa71 --- /dev/null +++ b/google/cloud/bigtable/data/_async/_read_rows.py @@ -0,0 +1,365 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import annotations + +from typing import Sequence, TYPE_CHECKING + +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB +from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import RowRange as RowRangePB + +from google.cloud.bigtable.data.row import Row, Cell +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data.exceptions import _ResetRow +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory + +from google.api_core import retry as retries +from google.api_core.retry import exponential_sleep_generator + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + if CrossSync.is_async: + from google.cloud.bigtable.data._async.client import ( + _DataApiTargetAsync as TargetType, + ) + else: + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._read_rows" + + +@CrossSync.convert_class("_ReadRowsOperation") +class _ReadRowsOperationAsync: + """ + ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream + into a stream of Row objects. + + ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse + and turns them into a stream of Row objects using an internal + StateMachine. + + ReadRowsOperation(request, client) handles row merging logic end-to-end, including + performing retries on stream errors. + + Args: + query: The query to execute + target: The table or view to send the request to + operation_timeout: The total time to allow for the operation, in seconds + attempt_timeout: The time to allow for each individual attempt, in seconds + retryable_exceptions: A list of exceptions that should trigger a retry + """ + + __slots__ = ( + "attempt_timeout_gen", + "operation_timeout", + "request", + "target", + "_predicate", + "_last_yielded_row_key", + "_remaining_count", + ) + + def __init__( + self, + query: ReadRowsQuery, + target: TargetType, + operation_timeout: float, + attempt_timeout: float, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + self.attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.operation_timeout = operation_timeout + if isinstance(query, dict): + self.request = ReadRowsRequestPB( + **query, + **target._request_path, + app_profile_id=target.app_profile_id, + ) + else: + self.request = query._to_pb(target) + self.target = target + self._predicate = retries.if_exception_type(*retryable_exceptions) + self._last_yielded_row_key: bytes | None = None + self._remaining_count: int | None = self.request.rows_limit or None + + def start_operation(self) -> CrossSync.Iterable[Row]: + """ + Start the read_rows operation, retrying on retryable errors. + + Yields: + Row: The next row in the stream + """ + return CrossSync.retry_target_stream( + self._read_rows_attempt, + self._predicate, + exponential_sleep_generator(0.01, 60, multiplier=2), + self.operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def _read_rows_attempt(self) -> CrossSync.Iterable[Row]: + """ + Attempt a single read_rows rpc call. + This function is intended to be wrapped by retry logic, + which will call this function until it succeeds or + a non-retryable error is raised. + + Yields: + Row: The next row in the stream + """ + # revise request keys and ranges between attempts + if self._last_yielded_row_key is not None: + # if this is a retry, try to trim down the request to avoid ones we've already processed + try: + self.request.rows = self._revise_request_rowset( + row_set=self.request.rows, + last_seen_row_key=self._last_yielded_row_key, + ) + except _RowSetComplete: + # if we've already seen all the rows, we're done + return self.merge_rows(None) + # revise the limit based on number of rows already yielded + if self._remaining_count is not None: + self.request.rows_limit = self._remaining_count + if self._remaining_count == 0: + return self.merge_rows(None) + # create and return a new row merger + gapic_stream = self.target.client._gapic_client.read_rows( + self.request, + timeout=next(self.attempt_timeout_gen), + retry=None, + ) + chunked_stream = self.chunk_stream(gapic_stream) + return self.merge_rows(chunked_stream) + + @CrossSync.convert() + async def chunk_stream( + self, stream: CrossSync.Awaitable[CrossSync.Iterable[ReadRowsResponsePB]] + ) -> CrossSync.Iterable[ReadRowsResponsePB.CellChunk]: + """ + process chunks out of raw read_rows stream + + Args: + stream: the raw read_rows stream from the gapic client + Yields: + ReadRowsResponsePB.CellChunk: the next chunk in the stream + """ + async for resp in await stream: + # extract proto from proto-plus wrapper + resp = resp._pb + + # handle last_scanned_row_key packets, sent when server + # has scanned past the end of the row range + if resp.last_scanned_row_key: + if ( + self._last_yielded_row_key is not None + and resp.last_scanned_row_key <= self._last_yielded_row_key + ): + raise InvalidChunk("last scanned out of order") + self._last_yielded_row_key = resp.last_scanned_row_key + + current_key = None + # process each chunk in the response + for c in resp.chunks: + if current_key is None: + current_key = c.row_key + if current_key is None: + raise InvalidChunk("first chunk is missing a row key") + elif ( + self._last_yielded_row_key + and current_key <= self._last_yielded_row_key + ): + raise InvalidChunk("row keys should be strictly increasing") + + yield c + + if c.reset_row: + current_key = None + elif c.commit_row: + # update row state after each commit + self._last_yielded_row_key = current_key + if self._remaining_count is not None: + self._remaining_count -= 1 + if self._remaining_count < 0: + raise InvalidChunk("emit count exceeds row limit") + current_key = None + + @staticmethod + @CrossSync.convert( + replace_symbols={"__aiter__": "__iter__", "__anext__": "__next__"}, + ) + async def merge_rows( + chunks: CrossSync.Iterable[ReadRowsResponsePB.CellChunk] | None, + ) -> CrossSync.Iterable[Row]: + """ + Merge chunks into rows + + Args: + chunks: the chunk stream to merge + Yields: + Row: the next row in the stream + """ + if chunks is None: + return + it = chunks.__aiter__() + # For each row + while True: + try: + c = await it.__anext__() + except CrossSync.StopIteration: + # stream complete + return + row_key = c.row_key + + if not row_key: + raise InvalidChunk("first row chunk is missing key") + + cells = [] + + # shared per cell storage + family: str | None = None + qualifier: bytes | None = None + + try: + # for each cell + while True: + if c.reset_row: + raise _ResetRow(c) + k = c.row_key + f = c.family_name.value + q = c.qualifier.value if c.HasField("qualifier") else None + if k and k != row_key: + raise InvalidChunk("unexpected new row key") + if f: + family = f + if q is not None: + qualifier = q + else: + raise InvalidChunk("new family without qualifier") + elif family is None: + raise InvalidChunk("missing family") + elif q is not None: + if family is None: + raise InvalidChunk("new qualifier without family") + qualifier = q + elif qualifier is None: + raise InvalidChunk("missing qualifier") + + ts = c.timestamp_micros + labels = c.labels if c.labels else [] + value = c.value + + # merge split cells + if c.value_size > 0: + buffer = [value] + while c.value_size > 0: + # throws when premature end + c = await it.__anext__() + + t = c.timestamp_micros + cl = c.labels + k = c.row_key + if ( + c.HasField("family_name") + and c.family_name.value != family + ): + raise InvalidChunk("family changed mid cell") + if ( + c.HasField("qualifier") + and c.qualifier.value != qualifier + ): + raise InvalidChunk("qualifier changed mid cell") + if t and t != ts: + raise InvalidChunk("timestamp changed mid cell") + if cl and cl != labels: + raise InvalidChunk("labels changed mid cell") + if k and k != row_key: + raise InvalidChunk("row key changed mid cell") + + if c.reset_row: + raise _ResetRow(c) + buffer.append(c.value) + value = b"".join(buffer) + cells.append( + Cell(value, row_key, family, qualifier, ts, list(labels)) + ) + if c.commit_row: + yield Row(row_key, cells) + break + c = await it.__anext__() + except _ResetRow as e: + c = e.chunk + if ( + c.row_key + or c.HasField("family_name") + or c.HasField("qualifier") + or c.timestamp_micros + or c.labels + or c.value + ): + raise InvalidChunk("reset row with data") + continue + except CrossSync.StopIteration: + raise InvalidChunk("premature end of stream") + + @staticmethod + def _revise_request_rowset( + row_set: RowSetPB, + last_seen_row_key: bytes, + ) -> RowSetPB: + """ + Revise the rows in the request to avoid ones we've already processed. + + Args: + row_set: the row set from the request + last_seen_row_key: the last row key encountered + Returns: + RowSetPB: the new rowset after adusting for the last seen key + Raises: + _RowSetComplete: if there are no rows left to process after the revision + """ + # if user is doing a whole table scan, start a new one with the last seen key + if row_set is None or (not row_set.row_ranges and not row_set.row_keys): + last_seen = last_seen_row_key + return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)]) + # remove seen keys from user-specific key list + adjusted_keys: list[bytes] = [ + k for k in row_set.row_keys if k > last_seen_row_key + ] + # adjust ranges to ignore keys before last seen + adjusted_ranges: list[RowRangePB] = [] + for row_range in row_set.row_ranges: + end_key = row_range.end_key_closed or row_range.end_key_open or None + if end_key is None or end_key > last_seen_row_key: + # end range is after last seen key + new_range = RowRangePB(row_range) + start_key = row_range.start_key_closed or row_range.start_key_open + if start_key is None or start_key <= last_seen_row_key: + # replace start key with last seen + new_range.start_key_open = last_seen_row_key + adjusted_ranges.append(new_range) + if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: + # if the query is empty after revision, raise an exception + # this will avoid an unwanted full table scan + raise _RowSetComplete() + return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges) diff --git a/google/cloud/bigtable/data/_async/_swappable_channel.py b/google/cloud/bigtable/data/_async/_swappable_channel.py new file mode 100644 index 000000000..bbc9a0d47 --- /dev/null +++ b/google/cloud/bigtable/data/_async/_swappable_channel.py @@ -0,0 +1,139 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Callable + +from google.cloud.bigtable.data._cross_sync import CrossSync + +from grpc import ChannelConnectivity + +if CrossSync.is_async: + from grpc.aio import Channel +else: + from grpc import Channel + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._swappable_channel" + + +@CrossSync.convert_class(sync_name="_WrappedChannel", rm_aio=True) +class _AsyncWrappedChannel(Channel): + """ + A wrapper around a gRPC channel. All methods are passed + through to the underlying channel. + """ + + def __init__(self, channel: Channel): + self._channel = channel + + def unary_unary(self, *args, **kwargs): + return self._channel.unary_unary(*args, **kwargs) + + def unary_stream(self, *args, **kwargs): + return self._channel.unary_stream(*args, **kwargs) + + def stream_unary(self, *args, **kwargs): + return self._channel.stream_unary(*args, **kwargs) + + def stream_stream(self, *args, **kwargs): + return self._channel.stream_stream(*args, **kwargs) + + async def channel_ready(self): + return await self._channel.channel_ready() + + @CrossSync.convert( + sync_name="__enter__", replace_symbols={"__aenter__": "__enter__"} + ) + async def __aenter__(self): + await self._channel.__aenter__() + return self + + @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"}) + async def __aexit__(self, exc_type, exc_val, exc_tb): + return await self._channel.__aexit__(exc_type, exc_val, exc_tb) + + def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity: + return self._channel.get_state(try_to_connect=try_to_connect) + + async def wait_for_state_change(self, last_observed_state): + return await self._channel.wait_for_state_change(last_observed_state) + + def __getattr__(self, name): + return getattr(self._channel, name) + + async def close(self, grace=None): + if CrossSync.is_async: + return await self._channel.close(grace=grace) + else: + # grace not supported by sync version + return self._channel.close() + + if not CrossSync.is_async: + # add required sync methods + + def subscribe(self, callback, try_to_connect=False): + return self._channel.subscribe(callback, try_to_connect) + + def unsubscribe(self, callback): + return self._channel.unsubscribe(callback) + + +@CrossSync.convert_class( + sync_name="SwappableChannel", + replace_symbols={"_AsyncWrappedChannel": "_WrappedChannel"}, +) +class AsyncSwappableChannel(_AsyncWrappedChannel): + """ + Provides a grpc channel wrapper, that allows the internal channel to be swapped out + + Args: + - channel_fn: a nullary function that returns a new channel instance. + It should be a partial with all channel configuration arguments built-in + """ + + def __init__(self, channel_fn: Callable[[], Channel]): + self._channel_fn = channel_fn + self._channel = channel_fn() + + def create_channel(self) -> Channel: + """ + Create a fresh channel using the stored `channel_fn` partial + """ + new_channel = self._channel_fn() + if CrossSync.is_async: + # copy over interceptors + # this is needed because of how gapic attaches the LoggingClientAIOInterceptor + # sync channels add interceptors by wrapping, so this step isn't needed + new_channel._unary_unary_interceptors = ( + self._channel._unary_unary_interceptors + ) + new_channel._unary_stream_interceptors = ( + self._channel._unary_stream_interceptors + ) + new_channel._stream_unary_interceptors = ( + self._channel._stream_unary_interceptors + ) + new_channel._stream_stream_interceptors = ( + self._channel._stream_stream_interceptors + ) + return new_channel + + def swap_channel(self, new_channel: Channel) -> Channel: + """ + Replace the wrapped channel with a new instance. Typically created using `create_channel` + """ + old_channel = self._channel + self._channel = new_channel + return old_channel diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py new file mode 100644 index 000000000..f86c886f0 --- /dev/null +++ b/google/cloud/bigtable/data/_async/client.py @@ -0,0 +1,1890 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import annotations + +from typing import ( + cast, + Any, + AsyncIterable, + Callable, + Optional, + Set, + Sequence, + TYPE_CHECKING, +) + +import abc +import time +import warnings +import random +import os +import concurrent.futures + +from functools import partial +from grpc import Channel + +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import ( + SqlType, + _pb_metadata_to_metadata_types, +) +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, + _to_param_types, +) +from google.cloud.bigtable_v2.services.bigtable.transports.base import ( + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore +from google.api_core import retry as retries +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import Cancelled +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper + +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts +from google.cloud.bigtable.data._helpers import _WarmedInstanceKey +from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._helpers import _validate_timeouts +from google.cloud.bigtable.data._helpers import _get_error_type +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry + +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain +from google.cloud.bigtable.data._metrics import BigtableClientSideMetricsController + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from grpc.aio import insecure_channel + from google.cloud.bigtable_v2.services.bigtable.transports import ( + BigtableGrpcAsyncIOTransport as TransportType, + ) + from google.cloud.bigtable_v2.services.bigtable import ( + BigtableAsyncClient as GapicClient, + ) + from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel as SwappableChannelType, + ) + from google.cloud.bigtable.data._async.metrics_interceptor import ( + AsyncBigtableMetricsInterceptor as MetricsInterceptorType, + ) +else: + from typing import Iterable # noqa: F401 + from grpc import insecure_channel + from grpc import intercept_channel + from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore + from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401 + SwappableChannel as SwappableChannelType, + ) + from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401 + BigtableMetricsInterceptor as MetricsInterceptorType, + ) + +if TYPE_CHECKING: + from google.cloud.bigtable.data._helpers import RowKeySamples + from google.cloud.bigtable.data._helpers import ShardedQuery + + if CrossSync.is_async: + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) + from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, + ) + else: + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import ( # noqa: F401 + MutationsBatcher, + ) + from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( # noqa: F401 + ExecuteQueryIterator, + ) + + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.client" + + +@CrossSync.convert_class( + sync_name="BigtableDataClient", + add_mapping_for_name="DataClient", +) +class BigtableDataClientAsync(ClientWithProject): + @CrossSync.convert( + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Client should be created within an async context (running event loop)", + None, + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + None, + ), + } + ) + def __init__( + self, + *, + project: str | None = None, + credentials: google.auth.credentials.Credentials | None = None, + client_options: dict[str, Any] + | "google.api_core.client_options.ClientOptions" + | None = None, + **kwargs, + ): + """ + Create a client instance for the Bigtable Data API + + {LOOP_MESSAGE} + + Args: + project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + credentials: + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + client_options: + Client options used to set user options + on the client. API Endpoint should be set through client_options. + Raises: + {RAISE_NO_LOOP} + """ + if "pool_size" in kwargs: + warnings.warn("pool_size no longer supported") + # set up client info headers for veneer library + self.client_info = DEFAULT_CLIENT_INFO + self.client_info.client_library_version = self._client_version() + # parse client options + if type(client_options) is dict: + client_options = client_options_lib.from_dict(client_options) + client_options = cast( + Optional[client_options_lib.ClientOptions], client_options + ) + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + warnings.warn( + "Connecting to Bigtable emulator at {}".format(self._emulator_host), + RuntimeWarning, + stacklevel=2, + ) + # use insecure channel if emulator is set + if credentials is None: + credentials = google.auth.credentials.AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + self._metrics_interceptor = MetricsInterceptorType() + # initialize client + ClientWithProject.__init__( + self, + credentials=credentials, + project=project, + client_options=client_options, + ) + self._gapic_client = GapicClient( + credentials=credentials, + client_options=client_options, + client_info=self.client_info, + transport=lambda *args, **kwargs: TransportType( + *args, **kwargs, channel=self._build_grpc_channel + ), + ) + if ( + credentials + and credentials.universe_domain != self.universe_domain + and self._emulator_host is None + ): + # validate that the universe domain of the credentials matches the + # universe domain configured in client_options + raise ValueError( + f"The configured universe domain ({self.universe_domain}) does " + "not match the universe domain found in the credentials " + f"({self._credentials.universe_domain}). If you haven't " + "configured the universe domain explicitly, `googleapis.com` " + "is the default." + ) + self._is_closed = CrossSync.Event() + self.transport = cast(TransportType, self._gapic_client.transport) + # keep track of active instances to for warmup on channel refresh + self._active_instances: Set[_WarmedInstanceKey] = set() + # keep track of _DataApiTarget objects associated with each instance + # only remove instance from _active_instances when all associated targets are closed + self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} + self._channel_init_time = time.monotonic() + self._channel_refresh_task: CrossSync.Task[None] | None = None + self._executor: concurrent.futures.ThreadPoolExecutor | None = ( + concurrent.futures.ThreadPoolExecutor() if not CrossSync.is_async else None + ) + if self._emulator_host is None: + # attempt to start background channel refresh tasks + try: + self._start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an " + "asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) + + def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType: + """ + This method is called by the gapic transport to create a grpc channel. + + The init arguments passed down are captured in a partial used by SwappableChannel + to create new channel instances in the future, as part of the channel refresh logic + + Emulators always use an inseucre channel + + Args: + - *args: positional arguments passed by the gapic layer to create a new channel with + - **kwargs: keyword arguments passed by the gapic layer to create a new channel with + Returns: + a custom wrapped swappable channel + """ + create_channel_fn: Callable[[], Channel] + if self._emulator_host is not None: + # Emulators use insecure channels + create_channel_fn = partial(insecure_channel, self._emulator_host) + elif CrossSync.is_async: + # For async client, use the default create_channel. + create_channel_fn = partial(TransportType.create_channel, *args, **kwargs) + else: + # For sync client, wrap create_channel with interceptors. + def sync_create_channel_fn(): + return intercept_channel( + TransportType.create_channel(*args, **kwargs), + self._metrics_interceptor, + ) + + create_channel_fn = sync_create_channel_fn + + # Instantiate SwappableChannelType with the determined creation function. + new_channel = SwappableChannelType(create_channel_fn) + if CrossSync.is_async: + # Attach async interceptors to the channel instance itself. + new_channel._unary_unary_interceptors.append(self._metrics_interceptor) + new_channel._unary_stream_interceptors.append(self._metrics_interceptor) + return new_channel + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._gapic_client.universe_domain + + @property + def api_endpoint(self) -> str: + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._gapic_client.api_endpoint + + @staticmethod + def _client_version() -> str: + """ + Helper function to return the client version string for this client + """ + version_str = f"{google.cloud.bigtable.__version__}-data" + if CrossSync.is_async: + version_str += "-async" + return version_str + + @CrossSync.convert( + docstring_format_vars={ + "RAISE_NO_LOOP": ( + "RuntimeError: if not called in an asyncio event loop", + "None", + ) + } + ) + def _start_background_channel_refresh(self) -> None: + """ + Starts a background task to ping and warm grpc channel + + Raises: + {RAISE_NO_LOOP} + """ + if ( + not self._channel_refresh_task + and not self._emulator_host + and not self._is_closed.is_set() + ): + # raise error if not in an event loop in async client + CrossSync.verify_async_event_loop() + self._channel_refresh_task = CrossSync.create_task( + self._manage_channel, + sync_executor=self._executor, + task_name=f"{self.__class__.__name__} channel refresh", + ) + + @CrossSync.convert + async def close(self, timeout: float | None = 2.0): + """ + Cancel all background tasks + """ + self._is_closed.set() + if self._channel_refresh_task is not None: + self._channel_refresh_task.cancel() + await CrossSync.wait([self._channel_refresh_task], timeout=timeout) + await self.transport.close() + if self._executor: + self._executor.shutdown(wait=False) + self._channel_refresh_task = None + + @CrossSync.convert + async def _ping_and_warm_instances( + self, + instance_key: _WarmedInstanceKey | None = None, + channel: Channel | None = None, + ) -> list[BaseException | None]: + """ + Prepares the backend for requests on a channel + + Pings each Bigtable instance registered in `_active_instances` on the client + + Args: + instance_key: if provided, only warm the instance associated with the key + channel: grpc channel to warm. If none, warms `self.transport.grpc_channel` + Returns: + list[BaseException | None]: sequence of results or exceptions from the ping requests + """ + channel = channel or self.transport.grpc_channel + instance_list = ( + [instance_key] if instance_key is not None else self._active_instances + ) + ping_rpc = channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=PingAndWarmRequest.serialize, + ) + # prepare list of coroutines to run + partial_list = [ + partial( + ping_rpc, + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, + ) + for (instance_name, app_profile_id) in instance_list + ] + result_list = await CrossSync.gather_partials( + partial_list, return_exceptions=True, sync_executor=self._executor + ) + return [r or None for r in result_list] + + def _invalidate_channel_stubs(self): + """Helper to reset the cached stubs. Needed when changing out the grpc channel""" + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) + + @CrossSync.convert + async def _manage_channel( + self, + refresh_interval_min: float = 60 * 35, + refresh_interval_max: float = 60 * 45, + grace_period: float = 60 * 10, + ) -> None: + """ + Background task that periodically refreshes and warms a grpc channel + + The backend will automatically close channels after 60 minutes, so + `refresh_interval` + `grace_period` should be < 60 minutes + + Runs continuously until the client is closed + + Args: + refresh_interval_min: minimum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + refresh_interval_max: maximum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + grace_period: time to allow previous channel to serve existing + requests before closing, in seconds + """ + if not isinstance(self.transport.grpc_channel, SwappableChannelType): + warnings.warn("Channel does not support auto-refresh.") + return + super_channel: SwappableChannelType = self.transport.grpc_channel + first_refresh = self._channel_init_time + random.uniform( + refresh_interval_min, refresh_interval_max + ) + next_sleep = max(first_refresh - time.monotonic(), 0) + if next_sleep > 0: + # warm the current channel immediately + await self._ping_and_warm_instances(channel=super_channel) + # continuously refresh the channel every `refresh_interval` seconds + while not self._is_closed.is_set(): + await CrossSync.event_wait( + self._is_closed, + next_sleep, + async_break_early=False, # no need to interrupt sleep. Task will be cancelled on close + ) + if self._is_closed.is_set(): + # don't refresh if client is closed + break + start_timestamp = time.monotonic() + # prepare new channel for use + new_channel = super_channel.create_channel() + await self._ping_and_warm_instances(channel=new_channel) + # cycle channel out of use, with long grace window before closure + old_channel = super_channel.swap_channel(new_channel) + self._invalidate_channel_stubs() + # give old_channel a chance to complete existing rpcs + if grace_period: + await CrossSync.event_wait( + self._is_closed, grace_period, async_break_early=False + ) + await old_channel.close() + # subtract the time spent waiting for the channel to be replaced + next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) + next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) + + @CrossSync.convert( + replace_symbols={ + "TableAsync": "Table", + "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + "_DataApiTargetAsync": "_DataApiTarget", + } + ) + async def _register_instance( + self, + instance_id: str, + app_profile_id: Optional[str], + owner_id: int, + ) -> None: + """ + Registers an instance with the client, and warms the channel for the instance + The client will periodically refresh grpc channel used to make + requests, and new channels will be warmed for each registered instance + Channels will not be refreshed unless at least one instance is registered + + Args: + instance_id: id of the instance to register. + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration. Can be obtained by calling + `id` identity funcion, using `id(owner)` + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + self._instance_owners.setdefault(instance_key, set()).add(owner_id) + if instance_key not in self._active_instances: + self._active_instances.add(instance_key) + if self._channel_refresh_task: + # refresh tasks already running + # call ping and warm on all existing channels + await self._ping_and_warm_instances(instance_key) + else: + # refresh tasks aren't active. start them as background tasks + self._start_background_channel_refresh() + + @CrossSync.convert( + replace_symbols={ + "TableAsync": "Table", + "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + "_DataApiTargetAsync": "_DataApiTarget", + } + ) + def _remove_instance_registration( + self, + instance_id: str, + app_profile_id: Optional[str], + owner_id: int, + ) -> bool: + """ + Removes an instance from the client's registered instances, to prevent + warming new channels for the instance + + If instance_id is not registered, or is still in use by other tables, returns False + + Args: + instance_id: id of the instance to remove + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Can be + obtained by the `id` identity funcion, using `id(owner)`. + Returns: + bool: True if instance was removed, else False + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + owner_list = self._instance_owners.get(instance_key, set()) + try: + owner_list.remove(owner_id) + if len(owner_list) == 0: + self._active_instances.remove(instance_key) + return True + except KeyError: + return False + + @CrossSync.convert( + replace_symbols={"TableAsync": "Table"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) + def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAsync: + """ + Returns a table instance for making data API requests. All arguments are passed + directly to the TableAsync constructor. + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Returns: + TableAsync: a table instance for making data API requests + Raises: + {RAISE_NO_LOOP} + """ + return TableAsync(self, instance_id, table_id, *args, **kwargs) + + @CrossSync.convert( + replace_symbols={"AuthorizedViewAsync": "AuthorizedView"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) + def get_authorized_view( + self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs + ) -> AuthorizedViewAsync: + """ + Returns an authorized view instance for making data API requests. All arguments are passed + directly to the AuthorizedViewAsync constructor. + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to Table's value + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults Table's value + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to Table's value + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults Table's value + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to Table's value + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to Table's value + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. If not set, + defaults to Table's value + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. If not set, + defaults to Table's value + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. If not set, defaults to + Table's value + Returns: + AuthorizedViewAsync: a table instance for making data API requests + Raises: + {RAISE_NO_LOOP} + """ + return CrossSync.AuthorizedView( + self, + instance_id, + table_id, + authorized_view_id, + *args, + **kwargs, + ) + + @CrossSync.convert( + replace_symbols={"ExecuteQueryIteratorAsync": "ExecuteQueryIterator"} + ) + async def execute_query( + self, + query: str, + instance_id: str, + *, + parameters: dict[str, ExecuteQueryValueType] | None = None, + parameter_types: dict[str, SqlType.Type] | None = None, + app_profile_id: str | None = None, + operation_timeout: float = 600, + attempt_timeout: float | None = 20, + retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + prepare_operation_timeout: float = 60, + prepare_attempt_timeout: float | None = 20, + prepare_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> "ExecuteQueryIteratorAsync": + """ + Executes an SQL query on an instance. + Returns an iterator to asynchronously stream back columns from selected rows. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``. + These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the + work happens. + + Args: + query: Query to be run on Bigtable instance. The query can use ``@param`` + placeholders to use parameter interpolation on the server. Values for all + parameters should be provided in ``parameters``. Types of parameters are + inferred but should be provided in ``parameter_types`` if the inference is + not possible (i.e. when value can be None, an empty list or an empty dict). + instance_id: The Bigtable instance ID to perform the query on. + instance_id is combined with the client's project to fully + specify the instance. + parameters: Dictionary with values for all parameters used in the ``query``. + parameter_types: Dictionary with types of parameters used in the ``query``. + Required to contain entries only for parameters whose type cannot be + detected automatically (i.e. the value can be None, an empty list or + an empty dict). + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + operation_timeout: the time budget for the entire executeQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + attempt_timeout: the time budget for an individual executeQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered during executeQuery. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 60 seconds. + prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to prepare_operation_timeout. + prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects. + This dictionary provides the necessary type information for deserializing PROTO and + ENUM column values from the query results. When an entry is provided + for a PROTO or ENUM column, the client library will attempt to deserialize the raw data. + + - For PROTO columns: The value in the dictionary should be the + Protobuf Message class (e.g., ``my_pb2.MyMessage``). + - For ENUM columns: The value should be the Protobuf EnumTypeWrapper + object (e.g., ``my_pb2.MyEnum``). + + Example:: + + import my_pb2 + + column_info = { + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum + } + + If ``column_info`` is not provided, or if a specific column name is not found + in the dictionary: + + - PROTO columns will be returned as raw bytes. + - ENUM columns will be returned as integers. + + Note for Nested PROTO or ENUM Fields: + + To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated + path from the top-level column name. + + - For STRUCTs: ``struct_column_name.field_name`` + - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types + for the map keys or values, respectively. + + Example:: + + import my_pb2 + + column_info = { + # Top-level column + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum, + + # Nested field in a STRUCT column named 'my_struct' + "my_struct.nested_proto_field": my_pb2.OtherMessage, + "my_struct.nested_enum_field": my_pb2.AnotherEnum, + + # Nested field in a MAP column named 'my_map' + "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums + "my_map.value": my_pb2.MapValueMessage, + + # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column + "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto, + "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum + } + + Returns: + ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if + a parameter is passed without an explicit type, and the type cannot be infered + google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails. + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + converted_param_types = _to_param_types(parameters, parameter_types) + prepare_request = { + "instance_name": instance_name, + "query": query, + "app_profile_id": app_profile_id, + "param_types": converted_param_types, + "proto_format": {}, + } + prepare_predicate = retries.if_exception_type( + *[_get_error_type(e) for e in prepare_retryable_errors] + ) + prepare_operation_timeout, prepare_attempt_timeout = _align_timeouts( + prepare_operation_timeout, prepare_attempt_timeout + ) + prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + target = partial( + self._gapic_client.prepare_query, + request=prepare_request, + timeout=prepare_attempt_timeout, + retry=None, + ) + prepare_result = await CrossSync.retry_target( + target, + prepare_predicate, + prepare_sleep_generator, + prepare_operation_timeout, + exception_factory=_retry_exception_factory, + ) + + prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata) + + retryable_excs = [_get_error_type(e) for e in retryable_errors] + + pb_params = _format_execute_query_params(parameters, parameter_types) + + request_body = { + "instance_name": instance_name, + "app_profile_id": app_profile_id, + "prepared_query": prepare_result.prepared_query, + "params": pb_params, + } + operation_timeout, attempt_timeout = _align_timeouts( + operation_timeout, attempt_timeout + ) + + return CrossSync.ExecuteQueryIterator( + self, + instance_id, + app_profile_id, + request_body, + prepare_metadata, + attempt_timeout, + operation_timeout, + retryable_excs=retryable_excs, + column_info=column_info, + ) + + @CrossSync.convert(sync_name="__enter__") + async def __aenter__(self): + self._start_background_channel_refresh() + return self + + @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"}) + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) + + +@CrossSync.convert_class(sync_name="_DataApiTarget") +class _DataApiTargetAsync(abc.ABC): + """ + Abstract class containing API surface for BigtableDataClient. Should not be created directly + + Can be instantiated as a Table or an AuthorizedView + """ + + @CrossSync.convert( + replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) + def __init__( + self, + client: BigtableDataClientAsync, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + *, + default_read_rows_operation_timeout: float = 600, + default_read_rows_attempt_timeout: float | None = 20, + default_mutate_rows_operation_timeout: float = 600, + default_mutate_rows_attempt_timeout: float | None = 60, + default_operation_timeout: float = 60, + default_attempt_timeout: float | None = 20, + default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + Cancelled, + ), + default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + default_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + ): + """ + Initialize a Table instance + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + {RAISE_NO_LOOP} + """ + # NOTE: any changes to the signature of this method should also be reflected + # in client.get_table() + # validate timeouts + _validate_timeouts( + default_operation_timeout, default_attempt_timeout, allow_none=True + ) + _validate_timeouts( + default_read_rows_operation_timeout, + default_read_rows_attempt_timeout, + allow_none=True, + ) + _validate_timeouts( + default_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout, + allow_none=True, + ) + + self.client = client + self.instance_id = instance_id + self.instance_name = self.client._gapic_client.instance_path( + self.client.project, instance_id + ) + self.table_id = table_id + self.table_name = self.client._gapic_client.table_path( + self.client.project, instance_id, table_id + ) + self.app_profile_id: str | None = app_profile_id + + self.default_operation_timeout: float = default_operation_timeout + self.default_attempt_timeout: float | None = default_attempt_timeout + self.default_read_rows_operation_timeout: float = ( + default_read_rows_operation_timeout + ) + self.default_read_rows_attempt_timeout: float | None = ( + default_read_rows_attempt_timeout + ) + self.default_mutate_rows_operation_timeout: float = ( + default_mutate_rows_operation_timeout + ) + self.default_mutate_rows_attempt_timeout: float | None = ( + default_mutate_rows_attempt_timeout + ) + + self.default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + default_read_rows_retryable_errors or () + ) + self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + default_mutate_rows_retryable_errors or () + ) + self.default_retryable_errors: Sequence[type[Exception]] = ( + default_retryable_errors or () + ) + + self._metrics = BigtableClientSideMetricsController() + + try: + self._register_instance_future = CrossSync.create_task( + self.client._register_instance, + self.instance_id, + self.app_profile_id, + id(self), + sync_executor=self.client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + @abc.abstractmethod + def _request_path(self) -> dict[str, str]: + """ + Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass + + Unimplemented in base class + """ + raise NotImplementedError + + def __str__(self): + path_str = list(self._request_path.values())[0] if self._request_path else "" + return f"{self.__class__.__name__}<{path_str!r}>" + + @CrossSync.convert(replace_symbols={"AsyncIterable": "Iterable"}) + async def read_rows_stream( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> AsyncIterable[Row]: + """ + Read a set of rows from the table, based on the specified query. + Returns an iterator to asynchronously stream back row data. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors + Returns: + AsyncIterable[Row]: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + + row_merger = CrossSync._ReadRowsOperation( + query, + self, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_exceptions=retryable_excs, + ) + return row_merger.start_operation() + + @CrossSync.convert + async def read_rows( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """ + Read a set of rows from the table, based on the specified query. + Retruns results as a list of Row objects when the request is complete. + For streamed results, use read_rows_stream. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + If None, defaults to the Table's default_read_rows_attempt_timeout, + or the operation_timeout if that is also None. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + row_generator = await self.read_rows_stream( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return [row async for row in row_generator] + + @CrossSync.convert + async def read_row( + self, + row_key: str | bytes, + *, + row_filter: RowFilter | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Row | None: + """ + Read a single row from the table, based on the specified key. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + Row | None: a Row object if the row exists, otherwise None + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + if len(results) == 0: + return None + return results[0] + + @CrossSync.convert + async def read_rows_sharded( + self, + sharded_query: ShardedQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """ + Runs a sharded query in parallel, then return the results in a single list. + Results will be returned in the order of the input queries. + + This function is intended to be run on the results on a query.shard() call. + For example:: + + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) + + Args: + sharded_query: a sharded query to execute + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + ShardedReadRowsExceptionGroup: if any of the queries failed + ValueError: if the query_list is empty + """ + if not sharded_query: + raise ValueError("empty sharded_query") + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + # make sure each rpc stays within overall operation timeout + rpc_timeout_generator = _attempt_timeout_generator( + operation_timeout, operation_timeout + ) + + # limit the number of concurrent requests using a semaphore + concurrency_sem = CrossSync.Semaphore(_CONCURRENCY_LIMIT) + + @CrossSync.convert + async def read_rows_with_semaphore(query): + async with concurrency_sem: + # calculate new timeout based on time left in overall operation + shard_timeout = next(rpc_timeout_generator) + if shard_timeout <= 0: + raise DeadlineExceeded( + "Operation timeout exceeded before starting query" + ) + return await self.read_rows( + query, + operation_timeout=shard_timeout, + attempt_timeout=min(attempt_timeout, shard_timeout), + retryable_errors=retryable_errors, + ) + + routine_list = [ + partial(read_rows_with_semaphore, query) for query in sharded_query + ] + batch_result = await CrossSync.gather_partials( + routine_list, + return_exceptions=True, + sync_executor=self.client._executor, + ) + + # collect results and errors + error_dict = {} + shard_idx = 0 + results_list = [] + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + elif isinstance(result, BaseException): + # BaseException not expected; raise immediately + raise result + else: + results_list.extend(result) + shard_idx += 1 + if error_dict: + # if any sub-request failed, raise an exception instead of returning results + raise ShardedReadRowsExceptionGroup( + [ + FailedQueryShardError(idx, sharded_query[idx], e) + for idx, e in error_dict.items() + ], + results_list, + len(sharded_query), + ) + return results_list + + @CrossSync.convert + async def row_exists( + self, + row_key: str | bytes, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> bool: + """ + Return a boolean indicating whether the specified row exists in the table. + uses the filters: chain(limit cells per row = 1, strip value) + + Args: + row_key: the key of the row to check + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + bool: a bool indicating whether the row exists + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + + strip_filter = StripValueTransformerFilter(flag=True) + limit_filter = CellsRowLimitFilter(1) + chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) + query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return len(results) > 0 + + @CrossSync.convert + async def sample_row_keys( + self, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> RowKeySamples: + """ + Return a set of RowKeySamples that delimit contiguous sections of the table of + approximately equal size + + RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that + can be parallelized across multiple backend nodes read_rows and read_rows_stream + requests will call sample_row_keys internally for this purpose when sharding is enabled + + RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of + row_keys, along with offset positions in the table + + Args: + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget.i + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_retryable_errors. + Returns: + RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + # prepare timeouts + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + # prepare retryable + retryable_excs = _get_retryable_errors(retryable_errors, self) + predicate = retries.if_exception_type(*retryable_excs) + + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + @CrossSync.convert + async def execute_rpc(): + results = await self.client._gapic_client.sample_row_keys( + request=SampleRowKeysRequest( + app_profile_id=self.app_profile_id, **self._request_path + ), + timeout=next(attempt_timeout_gen), + retry=None, + ) + return [(s.row_key, s.offset_bytes) async for s in results] + + return await CrossSync.retry_target( + execute_rpc, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + @CrossSync.convert(replace_symbols={"MutationsBatcherAsync": "MutationsBatcher"}) + def mutations_batcher( + self, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100_000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ) -> "MutationsBatcherAsync": + """ + Returns a new mutations batcher instance. + + Can be used to iteratively add mutations that are flushed as a group, + to avoid excess network calls + + Args: + flush_interval: Automatically flush every flush_interval seconds. If None, + a table default will be used + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + Defaults to the Table's default_mutate_rows_operation_timeout + batch_attempt_timeout: timeout for each individual request, in seconds. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + Returns: + MutationsBatcherAsync: a MutationsBatcherAsync context manager that can batch requests + """ + return CrossSync.MutationsBatcher( + self, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_mutation_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=batch_operation_timeout, + batch_attempt_timeout=batch_attempt_timeout, + batch_retryable_errors=batch_retryable_errors, + ) + + @CrossSync.convert + async def mutate_row( + self, + row_key: str | bytes, + mutations: list[Mutation] | Mutation, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ): + """ + Mutates a row atomically. + + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. + + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. + + Args: + row_key: the row to apply mutations to + mutations: the set of mutations to apply to the row + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + ValueError: if invalid arguments are provided + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + + if not mutations: + raise ValueError("No mutations provided") + mutations_list = mutations if isinstance(mutations, list) else [mutations] + + if all(mutation.is_idempotent() for mutation in mutations_list): + # mutations are all idempotent and safe to retry + predicate = retries.if_exception_type( + *_get_retryable_errors(retryable_errors, self) + ) + else: + # mutations should not be retried + predicate = retries.if_exception_type() + + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + target = partial( + self.client._gapic_client.mutate_row, + request=MutateRowRequest( + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=attempt_timeout, + retry=None, + ) + return await CrossSync.retry_target( + target, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + @CrossSync.convert + async def bulk_mutate_rows( + self, + mutation_entries: list[RowMutationEntry], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + """ + Applies mutations for multiple rows in a single batched request. + + Each individual RowMutationEntry is applied atomically, but separate entries + may be applied in arbitrary order (even for entries targetting the same row) + In total, the row_mutations can contain at most 100000 individual mutations + across all entries + + Idempotent entries (i.e., entries with mutations with explicit timestamps) + will be retried on failure. Non-idempotent will not, and will reported in a + raised exception group + + Args: + mutation_entries: the batches of mutations to apply + Each entry will be applied atomically, but entries will be applied + in arbitrary order + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_mutate_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors + Raises: + MutationsExceptionGroup: if one or more mutations fails + Contains details about any failed entries in .exceptions + ValueError: if invalid arguments are provided + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + + operation = CrossSync._MutateRowsOperation( + self.client._gapic_client, + self, + mutation_entries, + operation_timeout, + attempt_timeout, + retryable_exceptions=retryable_excs, + ) + await operation.start() + + @CrossSync.convert + async def check_and_mutate_row( + self, + row_key: str | bytes, + predicate: RowFilter | None, + *, + true_case_mutations: Mutation | list[Mutation] | None = None, + false_case_mutations: Mutation | list[Mutation] | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> bool: + """ + Mutates a row atomically based on the output of a predicate filter + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to mutate + predicate: the filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, + either true_case_mutations or false_case_mutations will be executed. + If None, checks that the row contains any values at all. + true_case_mutations: + Changes to be atomically applied to the specified row if + predicate yields at least one cell when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + false_case_mutations is empty, and at most 100000. + false_case_mutations: + Changes to be atomically applied to the specified row if + predicate_filter does not yield any cells when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + `true_case_mutations` is empty, and at most 100000. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. Defaults to the Table's default_operation_timeout + Returns: + bool indicating whether the predicate was true or false + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call + """ + operation_timeout, _ = _get_timeouts(operation_timeout, None, self) + if true_case_mutations is not None and not isinstance( + true_case_mutations, list + ): + true_case_mutations = [true_case_mutations] + true_case_list = [m._to_pb() for m in true_case_mutations or []] + if false_case_mutations is not None and not isinstance( + false_case_mutations, list + ): + false_case_mutations = [false_case_mutations] + false_case_list = [m._to_pb() for m in false_case_mutations or []] + result = await self.client._gapic_client.check_and_mutate_row( + request=CheckAndMutateRowRequest( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=operation_timeout, + retry=None, + ) + return result.predicate_matched + + @CrossSync.convert + async def read_modify_write_row( + self, + row_key: str | bytes, + rules: ReadModifyWriteRule | list[ReadModifyWriteRule], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> Row: + """ + Reads and modifies a row atomically according to input ReadModifyWriteRules, + and returns the contents of all modified cells + + The new value for the timestamp is the greater of the existing timestamp or + the current server time. + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to apply read/modify/write rules to + rules: A rule or set of rules to apply to the row. + Rules are applied in order, meaning that earlier rules will affect the + results of later ones. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Defaults to the Table's default_operation_timeout. + Returns: + Row: a Row containing cell data that was modified as part of the operation + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call + ValueError: if invalid arguments are provided + """ + operation_timeout, _ = _get_timeouts(operation_timeout, None, self) + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if rules is not None and not isinstance(rules, list): + rules = [rules] + if not rules: + raise ValueError("rules must contain at least one item") + result = await self.client._gapic_client.read_modify_write_row( + request=ReadModifyWriteRowRequest( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=operation_timeout, + retry=None, + ) + # construct Row from result + return Row._from_pb(result.row) + + @CrossSync.convert + async def close(self): + """ + Called to close the Table instance and release any resources held by it. + """ + self._metrics.close() + if self._register_instance_future: + self._register_instance_future.cancel() + self.client._remove_instance_registration( + self.instance_id, self.app_profile_id, id(self) + ) + + @CrossSync.convert(sync_name="__enter__") + async def __aenter__(self): + """ + Implement async context manager protocol + + Ensure registration task has time to run, so that + grpc channels will be warmed for the specified instance + """ + if self._register_instance_future: + await self._register_instance_future + return self + + @CrossSync.convert(sync_name="__exit__") + async def __aexit__(self, exc_type, exc_val, exc_tb): + """ + Implement async context manager protocol + + Unregister this instance with the client, so that + grpc channels will no longer be warmed + """ + await self.close() + + +@CrossSync.convert_class( + sync_name="Table", + add_mapping_for_name="Table", + replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"}, +) +class TableAsync(_DataApiTargetAsync): + """ + Main Data API surface for interacting with a Bigtable table. + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + @property + def _request_path(self) -> dict[str, str]: + return {"table_name": self.table_name} + + +@CrossSync.convert_class( + sync_name="AuthorizedView", + add_mapping_for_name="AuthorizedView", + replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"}, +) +class AuthorizedViewAsync(_DataApiTargetAsync): + """ + Provides access to an authorized view of a table. + + An authorized view is a subset of a table that you configure to include specific table data. + Then you grant access to the authorized view separately from access to the table. + + AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context, + and passed them with each call + """ + + @CrossSync.convert( + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + } + ) + def __init__( + self, + client, + instance_id, + table_id, + authorized_view_id, + app_profile_id: str | None = None, + **kwargs, + ): + """ + Initialize an AuthorizedView instance + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + {RAISE_NO_LOOP} + """ + super().__init__(client, instance_id, table_id, app_profile_id, **kwargs) + self.authorized_view_id = authorized_view_id + self.authorized_view_name: str = self.client._gapic_client.authorized_view_path( + self.client.project, instance_id, table_id, authorized_view_id + ) + + @property + def _request_path(self) -> dict[str, str]: + return {"authorized_view_name": self.authorized_view_name} diff --git a/google/cloud/bigtable/data/_async/metrics_interceptor.py b/google/cloud/bigtable/data/_async/metrics_interceptor.py new file mode 100644 index 000000000..249dcdcc9 --- /dev/null +++ b/google/cloud/bigtable/data/_async/metrics_interceptor.py @@ -0,0 +1,172 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import Sequence + +import time +from functools import wraps + +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import OperationState +from google.cloud.bigtable.data._metrics.data_model import OperationType + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from grpc.aio import UnaryUnaryClientInterceptor + from grpc.aio import UnaryStreamClientInterceptor + from grpc.aio import AioRpcError +else: + from grpc import UnaryUnaryClientInterceptor + from grpc import UnaryStreamClientInterceptor + + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.metrics_interceptor" + + +def _with_active_operation(func): + """ + Decorator for interceptor methods to extract the active operation associated with the + in-scope contextvars, and pass it to the decorated function. + """ + + @wraps(func) + def wrapper(self, continuation, client_call_details, request): + operation: ActiveOperationMetric | None = ActiveOperationMetric.from_context() + + if operation: + # start a new attempt if not started + if ( + operation.state == OperationState.CREATED + or operation.state == OperationState.BETWEEN_ATTEMPTS + ): + operation.start_attempt() + # wrap continuation in logic to process the operation + return func(self, operation, continuation, client_call_details, request) + else: + # if operation not found, return unwrapped continuation + return continuation(client_call_details, request) + + return wrapper + + +@CrossSync.convert +async def _get_metadata(source) -> dict[str, str | bytes] | None: + """Helper to extract metadata from a call or RpcError""" + try: + metadata: Sequence[tuple[str, str | bytes]] + if CrossSync.is_async: + # grpc.aio returns metadata in Metadata objects + if isinstance(source, AioRpcError): + metadata = list(source.trailing_metadata()) + list( + source.initial_metadata() + ) + else: + metadata = list(await source.trailing_metadata()) + list( + await source.initial_metadata() + ) + else: + # sync grpc returns metadata as a sequence of tuples + metadata = source.trailing_metadata() + source.initial_metadata() + # convert metadata to dict format + return {k: v for (k, v) in metadata} + except Exception: + # ignore errors while fetching metadata + return None + + +@CrossSync.convert_class(sync_name="BigtableMetricsInterceptor") +class AsyncBigtableMetricsInterceptor( + UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor +): + """ + An async gRPC interceptor to add client metadata and print server metadata. + """ + + @CrossSync.convert + @_with_active_operation + async def intercept_unary_unary( + self, operation, continuation, client_call_details, request + ): + """ + Interceptor for unary rpcs: + - MutateRow + - CheckAndMutateRow + - ReadModifyWriteRow + """ + metadata = None + try: + call = await continuation(client_call_details, request) + metadata = await _get_metadata(call) + return call + except Exception as rpc_error: + metadata = await _get_metadata(rpc_error) + raise rpc_error + finally: + if metadata is not None: + operation.add_response_metadata(metadata) + + @CrossSync.convert + @_with_active_operation + async def intercept_unary_stream( + self, operation, continuation, client_call_details, request + ): + """ + Interceptor for streaming rpcs: + - ReadRows + - MutateRows + - SampleRowKeys + """ + try: + return self._streaming_generator_wrapper( + operation, await continuation(client_call_details, request) + ) + except Exception as rpc_error: + # handle errors while intializing stream + metadata = await _get_metadata(rpc_error) + if metadata is not None: + operation.add_response_metadata(metadata) + raise rpc_error + + @staticmethod + @CrossSync.convert + async def _streaming_generator_wrapper(operation, call): + """ + Wrapped generator to be returned by intercept_unary_stream. + """ + # only track has_first response for READ_ROWS + has_first_response = ( + operation.first_response_latency_ns is not None + or operation.op_type != OperationType.READ_ROWS + ) + encountered_exc = None + try: + async for response in call: + # record time to first response. Currently only used for READ_ROWs + if not has_first_response: + operation.first_response_latency_ns = ( + time.monotonic_ns() - operation.start_time_ns + ) + has_first_response = True + yield response + except Exception as e: + # handle errors while processing stream + encountered_exc = e + raise + finally: + if call is not None: + metadata = await _get_metadata(encountered_exc or call) + if metadata is not None: + operation.add_response_metadata(metadata) diff --git a/google/cloud/bigtable/data/_async/mutations_batcher.py b/google/cloud/bigtable/data/_async/mutations_batcher.py new file mode 100644 index 000000000..a8e99ea9e --- /dev/null +++ b/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -0,0 +1,536 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Sequence, TYPE_CHECKING, cast +import atexit +import warnings +from collections import deque +import concurrent.futures + +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT + +from google.cloud.bigtable.data.mutations import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, +) +from google.cloud.bigtable.data.mutations import Mutation + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + + if CrossSync.is_async: + from google.cloud.bigtable.data._async.client import ( + _DataApiTargetAsync as TargetType, + ) + else: + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.mutations_batcher" + +# used to make more readable default values +_MB_SIZE = 1024 * 1024 + + +@CrossSync.convert_class(sync_name="_FlowControl", add_mapping_for_name="_FlowControl") +class _FlowControlAsync: + """ + Manages flow control for batched mutations. Mutations are registered against + the FlowControl object before being sent, which will block if size or count + limits have reached capacity. As mutations completed, they are removed from + the FlowControl object, which will notify any blocked requests that there + is additional capacity. + + Flow limits are not hard limits. If a single mutation exceeds the configured + limits, it will be allowed as a single batch when the capacity is available. + + Args: + max_mutation_count: maximum number of mutations to send in a single rpc. + This corresponds to individual mutations in a single RowMutationEntry. + max_mutation_bytes: maximum number of bytes to send in a single rpc. + Raises: + ValueError: if max_mutation_count or max_mutation_bytes is less than 0 + """ + + def __init__( + self, + max_mutation_count: int, + max_mutation_bytes: int, + ): + self._max_mutation_count = max_mutation_count + self._max_mutation_bytes = max_mutation_bytes + if self._max_mutation_count < 1: + raise ValueError("max_mutation_count must be greater than 0") + if self._max_mutation_bytes < 1: + raise ValueError("max_mutation_bytes must be greater than 0") + self._capacity_condition = CrossSync.Condition() + self._in_flight_mutation_count = 0 + self._in_flight_mutation_bytes = 0 + + def _has_capacity(self, additional_count: int, additional_size: int) -> bool: + """ + Checks if there is capacity to send a new entry with the given size and count + + FlowControl limits are not hard limits. If a single mutation exceeds + the configured flow limits, it will be sent in a single batch when + previous batches have completed. + + Args: + additional_count: number of mutations in the pending entry + additional_size: size of the pending entry + Returns: + bool: True if there is capacity to send the pending entry, False otherwise + """ + # adjust limits to allow overly large mutations + acceptable_size = max(self._max_mutation_bytes, additional_size) + acceptable_count = max(self._max_mutation_count, additional_count) + # check if we have capacity for new mutation + new_size = self._in_flight_mutation_bytes + additional_size + new_count = self._in_flight_mutation_count + additional_count + return new_size <= acceptable_size and new_count <= acceptable_count + + @CrossSync.convert + async def remove_from_flow( + self, mutations: RowMutationEntry | list[RowMutationEntry] + ) -> None: + """ + Removes mutations from flow control. This method should be called once + for each mutation that was sent to add_to_flow, after the corresponding + operation is complete. + + Args: + mutations: mutation or list of mutations to remove from flow control + """ + if not isinstance(mutations, list): + mutations = [mutations] + total_count = sum(len(entry.mutations) for entry in mutations) + total_size = sum(entry.size() for entry in mutations) + self._in_flight_mutation_count -= total_count + self._in_flight_mutation_bytes -= total_size + # notify any blocked requests that there is additional capacity + async with self._capacity_condition: + self._capacity_condition.notify_all() + + @CrossSync.convert + async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): + """ + Generator function that registers mutations with flow control. As mutations + are accepted into the flow control, they are yielded back to the caller, + to be sent in a batch. If the flow control is at capacity, the generator + will block until there is capacity available. + + Args: + mutations: list mutations to break up into batches + Yields: + list[RowMutationEntry]: + list of mutations that have reserved space in the flow control. + Each batch contains at least one mutation. + """ + if not isinstance(mutations, list): + mutations = [mutations] + start_idx = 0 + end_idx = 0 + while end_idx < len(mutations): + start_idx = end_idx + batch_mutation_count = 0 + # fill up batch until we hit capacity + async with self._capacity_condition: + while end_idx < len(mutations): + next_entry = mutations[end_idx] + next_size = next_entry.size() + next_count = len(next_entry.mutations) + if ( + self._has_capacity(next_count, next_size) + # make sure not to exceed per-request mutation count limits + and (batch_mutation_count + next_count) + <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + ): + # room for new mutation; add to batch + end_idx += 1 + batch_mutation_count += next_count + self._in_flight_mutation_bytes += next_size + self._in_flight_mutation_count += next_count + elif start_idx != end_idx: + # we have at least one mutation in the batch, so send it + break + else: + # batch is empty. Block until we have capacity + await self._capacity_condition.wait_for( + lambda: self._has_capacity(next_count, next_size) + ) + yield mutations[start_idx:end_idx] + + +@CrossSync.convert_class(sync_name="MutationsBatcher") +class MutationsBatcherAsync: + """ + Allows users to send batches using context manager API. + + Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining + to use as few network requests as required + + Will automatically flush the batcher: + - every flush_interval seconds + - after queue size reaches flush_limit_mutation_count + - after queue reaches flush_limit_bytes + - when batcher is closed or destroyed + + Args: + table: table or autrhorized_view used to preform rpc calls + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. + batch_attempt_timeout: timeout for each individual request, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + """ + + def __init__( + self, + table: TargetType, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100_000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + self._operation_timeout, self._attempt_timeout = _get_timeouts( + batch_operation_timeout, batch_attempt_timeout, table + ) + self._retryable_errors: list[type[Exception]] = _get_retryable_errors( + batch_retryable_errors, table + ) + + self._closed = CrossSync.Event() + self._target = table + self._staged_entries: list[RowMutationEntry] = [] + self._staged_count, self._staged_bytes = 0, 0 + self._flow_control = CrossSync._FlowControl( + flow_control_max_mutation_count, flow_control_max_bytes + ) + self._flush_limit_bytes = flush_limit_bytes + self._flush_limit_count = ( + flush_limit_mutation_count + if flush_limit_mutation_count is not None + else float("inf") + ) + # used by sync class to run mutate_rows operations + self._sync_rpc_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=8) + if not CrossSync.is_async + else None + ) + # used by sync class to manage flush_internal tasks + self._sync_flush_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=4) + if not CrossSync.is_async + else None + ) + self._flush_timer = CrossSync.create_task( + self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor + ) + self._flush_jobs: set[CrossSync.Future[None]] = set() + # MutationExceptionGroup reports number of successful entries along with failures + self._entries_processed_since_last_raise: int = 0 + self._exceptions_since_last_raise: int = 0 + # keep track of the first and last _exception_list_limit exceptions + self._exception_list_limit: int = 10 + self._oldest_exceptions: list[Exception] = [] + self._newest_exceptions: deque[Exception] = deque( + maxlen=self._exception_list_limit + ) + # clean up on program exit + atexit.register(self._on_exit) + + @CrossSync.convert + async def _timer_routine(self, interval: float | None) -> None: + """ + Set up a background task to flush the batcher every interval seconds + + If interval is None, an empty future is returned + + Args: + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + """ + if not interval or interval <= 0: + return None + while not self._closed.is_set(): + # wait until interval has passed, or until closed + await CrossSync.event_wait( + self._closed, timeout=interval, async_break_early=False + ) + if not self._closed.is_set() and self._staged_entries: + self._schedule_flush() + + @CrossSync.convert + async def append(self, mutation_entry: RowMutationEntry): + """ + Add a new set of mutations to the internal queue + + Args: + mutation_entry: new entry to add to flush queue + Raises: + RuntimeError: if batcher is closed + ValueError: if an invalid mutation type is added + """ + # TODO: return a future to track completion of this entry + if self._closed.is_set(): + raise RuntimeError("Cannot append to closed MutationsBatcher") + if isinstance(cast(Mutation, mutation_entry), Mutation): + raise ValueError( + f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher" + ) + self._staged_entries.append(mutation_entry) + # start a new flush task if limits exceeded + self._staged_count += len(mutation_entry.mutations) + self._staged_bytes += mutation_entry.size() + if ( + self._staged_count >= self._flush_limit_count + or self._staged_bytes >= self._flush_limit_bytes + ): + self._schedule_flush() + # yield to the event loop to allow flush to run + await CrossSync.yield_to_event_loop() + + def _schedule_flush(self) -> CrossSync.Future[None] | None: + """ + Update the flush task to include the latest staged entries + + Returns: + Future[None] | None: + future representing the background task, if started + """ + if self._staged_entries: + entries, self._staged_entries = self._staged_entries, [] + self._staged_count, self._staged_bytes = 0, 0 + new_task = CrossSync.create_task( + self._flush_internal, entries, sync_executor=self._sync_flush_executor + ) + if not new_task.done(): + self._flush_jobs.add(new_task) + new_task.add_done_callback(self._flush_jobs.remove) + return new_task + return None + + @CrossSync.convert + async def _flush_internal(self, new_entries: list[RowMutationEntry]): + """ + Flushes a set of mutations to the server, and updates internal state + + Args: + new_entries list of RowMutationEntry objects to flush + """ + # flush new entries + in_process_requests: list[CrossSync.Future[list[FailedMutationEntryError]]] = [] + async for batch in self._flow_control.add_to_flow(new_entries): + batch_task = CrossSync.create_task( + self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor + ) + in_process_requests.append(batch_task) + # wait for all inflight requests to complete + found_exceptions = await self._wait_for_batch_results(*in_process_requests) + # update exception data to reflect any new errors + self._entries_processed_since_last_raise += len(new_entries) + self._add_exceptions(found_exceptions) + + @CrossSync.convert + async def _execute_mutate_rows( + self, batch: list[RowMutationEntry] + ) -> list[FailedMutationEntryError]: + """ + Helper to execute mutation operation on a batch + + Args: + batch: list of RowMutationEntry objects to send to server + timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. + If not given, will use table defaults + Returns: + list[FailedMutationEntryError]: + list of FailedMutationEntryError objects for mutations that failed. + FailedMutationEntryError objects will not contain index information + """ + try: + operation = CrossSync._MutateRowsOperation( + self._target.client._gapic_client, + self._target, + batch, + operation_timeout=self._operation_timeout, + attempt_timeout=self._attempt_timeout, + retryable_exceptions=self._retryable_errors, + ) + await operation.start() + except MutationsExceptionGroup as e: + # strip index information from exceptions, since it is not useful in a batch context + for subexc in e.exceptions: + subexc.index = None + return list(e.exceptions) + finally: + # mark batch as complete in flow control + await self._flow_control.remove_from_flow(batch) + return [] + + def _add_exceptions(self, excs: list[Exception]): + """ + Add new list of exceptions to internal store. To avoid unbounded memory, + the batcher will store the first and last _exception_list_limit exceptions, + and discard any in between. + + Args: + excs: list of exceptions to add to the internal store + """ + self._exceptions_since_last_raise += len(excs) + if excs and len(self._oldest_exceptions) < self._exception_list_limit: + # populate oldest_exceptions with found_exceptions + addition_count = self._exception_list_limit - len(self._oldest_exceptions) + self._oldest_exceptions.extend(excs[:addition_count]) + excs = excs[addition_count:] + if excs: + # populate newest_exceptions with remaining found_exceptions + self._newest_exceptions.extend(excs[-self._exception_list_limit :]) + + def _raise_exceptions(self): + """ + Raise any unreported exceptions from background flush operations + + Raises: + MutationsExceptionGroup: exception group with all unreported exceptions + """ + if self._oldest_exceptions or self._newest_exceptions: + oldest, self._oldest_exceptions = self._oldest_exceptions, [] + newest = list(self._newest_exceptions) + self._newest_exceptions.clear() + entry_count, self._entries_processed_since_last_raise = ( + self._entries_processed_since_last_raise, + 0, + ) + exc_count, self._exceptions_since_last_raise = ( + self._exceptions_since_last_raise, + 0, + ) + raise MutationsExceptionGroup.from_truncated_lists( + first_list=oldest, + last_list=newest, + total_excs=exc_count, + entry_count=entry_count, + ) + + @CrossSync.convert(sync_name="__enter__") + async def __aenter__(self): + """Allow use of context manager API""" + return self + + @CrossSync.convert(sync_name="__exit__") + async def __aexit__(self, exc_type, exc, tb): + """ + Allow use of context manager API. + + Flushes the batcher and cleans up resources. + """ + await self.close() + + @property + def closed(self) -> bool: + """ + Returns: + - True if the batcher is closed, False otherwise + """ + return self._closed.is_set() + + @CrossSync.convert + async def close(self): + """ + Flush queue and clean up resources + """ + self._closed.set() + self._flush_timer.cancel() + self._schedule_flush() + # shut down executors + if self._sync_flush_executor: + with self._sync_flush_executor: + self._sync_flush_executor.shutdown(wait=True) + if self._sync_rpc_executor: + with self._sync_rpc_executor: + self._sync_rpc_executor.shutdown(wait=True) + await CrossSync.wait([*self._flush_jobs, self._flush_timer]) + atexit.unregister(self._on_exit) + # raise unreported exceptions + self._raise_exceptions() + + def _on_exit(self): + """ + Called when program is exited. Raises warning if unflushed mutations remain + """ + if not self._closed.is_set() and self._staged_entries: + warnings.warn( + f"MutationsBatcher for target {self._target!r} was not closed. " + f"{len(self._staged_entries)} Unflushed mutations will not be sent to the server." + ) + + @staticmethod + @CrossSync.convert + async def _wait_for_batch_results( + *tasks: CrossSync.Future[list[FailedMutationEntryError]] + | CrossSync.Future[None], + ) -> list[Exception]: + """ + Takes in a list of futures representing _execute_mutate_rows tasks, + waits for them to complete, and returns a list of errors encountered. + + Args: + *tasks: futures representing _execute_mutate_rows or _flush_internal tasks + Returns: + list[Exception]: + list of Exceptions encountered by any of the tasks. Errors are expected + to be FailedMutationEntryError, representing a failed mutation operation. + If a task fails with a different exception, it will be included in the + output list. Successful tasks will not be represented in the output list. + """ + if not tasks: + return [] + exceptions: list[Exception] = [] + for task in tasks: + if CrossSync.is_async: + # futures don't need to be awaited in sync mode + await task + try: + exc_list = task.result() + if exc_list: + # expect a list of FailedMutationEntryError objects + for exc in exc_list: + # strip index information + exc.index = None + exceptions.extend(exc_list) + except Exception as e: + exceptions.append(e) + return exceptions diff --git a/google/cloud/bigtable/data/_cross_sync/__init__.py b/google/cloud/bigtable/data/_cross_sync/__init__.py new file mode 100644 index 000000000..77a9ddae9 --- /dev/null +++ b/google/cloud/bigtable/data/_cross_sync/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cross_sync import CrossSync + + +__all__ = [ + "CrossSync", +] diff --git a/google/cloud/bigtable/data/_cross_sync/_decorators.py b/google/cloud/bigtable/data/_cross_sync/_decorators.py new file mode 100644 index 000000000..a0dd140dd --- /dev/null +++ b/google/cloud/bigtable/data/_cross_sync/_decorators.py @@ -0,0 +1,448 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Contains a set of AstDecorator classes, which define the behavior of CrossSync decorators. +Each AstDecorator class is used through @CrossSync. +""" +from __future__ import annotations +from typing import TYPE_CHECKING, Iterable + +if TYPE_CHECKING: + import ast + from typing import Callable, Any + + +class AstDecorator: + """ + Helper class for CrossSync decorators used for guiding ast transformations. + + AstDecorators are accessed in two ways: + 1. The decorations are used directly as method decorations in the async client, + wrapping existing classes and methods + 2. The decorations are read back when processing the AST transformations when + generating sync code. + + This class allows the same decorator to be used in both contexts. + + Typically, AstDecorators act as a no-op in async code, and the arguments simply + provide configuration guidance for the sync code generation. + """ + + @classmethod + def decorator(cls, *args, **kwargs) -> Callable[..., Any]: + """ + Provides a callable that can be used as a decorator function in async code + + AstDecorator.decorate is called by CrossSync when attaching decorators to + the CrossSync class. + + This method creates a new instance of the class, using the arguments provided + to the decorator, and defers to the async_decorator method of the instance + to build the wrapper function. + + Arguments: + *args: arguments to the decorator + **kwargs: keyword arguments to the decorator + """ + # decorators with no arguments will provide the function to be wrapped + # as the first argument. Pull it out if it exists + func = None + if len(args) == 1 and callable(args[0]): + func = args[0] + args = args[1:] + # create new AstDecorator instance from given decorator arguments + new_instance = cls(*args, **kwargs) + # build wrapper + wrapper = new_instance.async_decorator() + if wrapper is None: + # if no wrapper, return no-op decorator + return func or (lambda f: f) + elif func: + # if we can, return single wrapped function + return wrapper(func) + else: + # otherwise, return decorator function + return wrapper + + def async_decorator(self) -> Callable[..., Any] | None: + """ + Decorator to apply the async_impl decorator to the wrapped function + + Default implementation is a no-op + """ + return None + + def sync_ast_transform( + self, wrapped_node: ast.AST, transformers_globals: dict[str, Any] + ) -> ast.AST | None: + """ + When this decorator is encountered in the ast during sync generation, this method is called + to transform the wrapped node. + + If None is returned, the node will be dropped from the output file. + + Args: + wrapped_node: ast node representing the wrapped function or class that is being wrapped + transformers_globals: the set of globals() from the transformers module. This is used to access + ast transformer classes that live outside the main codebase + Returns: + transformed ast node, or None if the node should be dropped + """ + return wrapped_node + + @classmethod + def get_for_node(cls, node: ast.Call | ast.Attribute | ast.Name) -> "AstDecorator": + """ + Build an AstDecorator instance from an ast decorator node + + The right subclass is found by comparing the string representation of the + decorator name to the class name. (Both names are converted to lowercase and + underscores are removed for comparison). If a matching subclass is found, + a new instance is created with the provided arguments. + + Args: + node: ast.Call node representing the decorator + Returns: + AstDecorator instance corresponding to the decorator + Raises: + ValueError: if the decorator cannot be parsed + """ + import ast + + # expect decorators in format @CrossSync. + # (i.e. should be an ast.Call or an ast.Attribute) + root_attr = node.func if isinstance(node, ast.Call) else node + if not isinstance(root_attr, ast.Attribute): + raise ValueError("Unexpected decorator format") + # extract the module and decorator names + if "CrossSync" in ast.dump(root_attr): + decorator_name = root_attr.attr + got_kwargs: dict[str, Any] = ( + {str(kw.arg): cls._convert_ast_to_py(kw.value) for kw in node.keywords} + if hasattr(node, "keywords") + else {} + ) + got_args = ( + [cls._convert_ast_to_py(arg) for arg in node.args] + if hasattr(node, "args") + else [] + ) + # convert to standardized representation + formatted_name = decorator_name.replace("_", "").lower() + for subclass in cls.get_subclasses(): + if subclass.__name__.lower() == formatted_name: + return subclass(*got_args, **got_kwargs) + raise ValueError(f"Unknown decorator encountered: {decorator_name}") + else: + raise ValueError("Not a CrossSync decorator") + + @classmethod + def get_subclasses(cls) -> Iterable[type["AstDecorator"]]: + """ + Get all subclasses of AstDecorator + + Returns: + list of all subclasses of AstDecorator + """ + for subclass in cls.__subclasses__(): + yield from subclass.get_subclasses() + yield subclass + + @classmethod + def _convert_ast_to_py(cls, ast_node: ast.expr | None) -> Any: + """ + Helper to convert ast primitives to python primitives. Used when unwrapping arguments + """ + import ast + + if ast_node is None: + return None + if isinstance(ast_node, ast.Constant): + return ast_node.value + if isinstance(ast_node, ast.List): + return [cls._convert_ast_to_py(node) for node in ast_node.elts] + if isinstance(ast_node, ast.Tuple): + return tuple(cls._convert_ast_to_py(node) for node in ast_node.elts) + if isinstance(ast_node, ast.Dict): + return { + cls._convert_ast_to_py(k): cls._convert_ast_to_py(v) + for k, v in zip(ast_node.keys, ast_node.values) + } + # unsupported node type + return ast_node + + +class ConvertClass(AstDecorator): + """ + Class decorator for guiding generation of sync classes + + Args: + sync_name: use a new name for the sync class + replace_symbols: a dict of symbols and replacements to use when generating sync class + docstring_format_vars: a dict of variables to replace in the docstring + rm_aio: if True, automatically strip all asyncio keywords from method. If false, + only keywords wrapped in CrossSync.rm_aio() calls to be removed. + add_mapping_for_name: when given, will add a new attribute to CrossSync, + so the original class and its sync version can be accessed from CrossSync. + """ + + def __init__( + self, + sync_name: str | None = None, + *, + replace_symbols: dict[str, str] | None = None, + docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None, + rm_aio: bool = False, + add_mapping_for_name: str | None = None, + ): + self.sync_name = sync_name + self.replace_symbols = replace_symbols + docstring_format_vars = docstring_format_vars or {} + self.async_docstring_format_vars = { + k: v[0] or "" for k, v in docstring_format_vars.items() + } + self.sync_docstring_format_vars = { + k: v[1] or "" for k, v in docstring_format_vars.items() + } + self.rm_aio = rm_aio + self.add_mapping_for_name = add_mapping_for_name + + def async_decorator(self): + """ + Use async decorator as a hook to update CrossSync mappings + """ + from .cross_sync import CrossSync + + if not self.add_mapping_for_name and not self.async_docstring_format_vars: + # return None if no changes needed + return None + + new_mapping = self.add_mapping_for_name + + def decorator(cls): + if new_mapping: + CrossSync.add_mapping(new_mapping, cls) + if self.async_docstring_format_vars: + cls.__doc__ = cls.__doc__.format(**self.async_docstring_format_vars) + return cls + + return decorator + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Transform async class into sync copy + """ + import ast + import copy + + # copy wrapped node + wrapped_node = copy.deepcopy(wrapped_node) + # update name + if self.sync_name: + wrapped_node.name = self.sync_name + # strip CrossSync decorators + if hasattr(wrapped_node, "decorator_list"): + wrapped_node.decorator_list = [ + d for d in wrapped_node.decorator_list if "CrossSync" not in ast.dump(d) + ] + else: + wrapped_node.decorator_list = [] + # strip async keywords if specified + if self.rm_aio: + wrapped_node = transformers_globals["AsyncToSync"]().visit(wrapped_node) + # add mapping decorator if needed + if self.add_mapping_for_name: + wrapped_node.decorator_list.append( + ast.Call( + func=ast.Attribute( + value=ast.Name(id="CrossSync", ctx=ast.Load()), + attr="add_mapping_decorator", + ctx=ast.Load(), + ), + args=[ + ast.Constant(value=self.add_mapping_for_name), + ], + keywords=[], + ) + ) + # replace symbols if specified + if self.replace_symbols: + wrapped_node = transformers_globals["SymbolReplacer"]( + self.replace_symbols + ).visit(wrapped_node) + # update docstring if specified + if self.sync_docstring_format_vars: + docstring = ast.get_docstring(wrapped_node) + if docstring: + wrapped_node.body[0].value = ast.Constant( + value=docstring.format(**self.sync_docstring_format_vars) + ) + return wrapped_node + + +class Convert(ConvertClass): + """ + Method decorator to mark async methods to be converted to sync methods + + Args: + sync_name: use a new name for the sync method + replace_symbols: a dict of symbols and replacements to use when generating sync method + docstring_format_vars: a dict of variables to replace in the docstring + rm_aio: if True, automatically strip all asyncio keywords from method. If False, + only the signature `async def` is stripped. Other keywords must be wrapped in + CrossSync.rm_aio() calls to be removed. + """ + + def __init__( + self, + sync_name: str | None = None, + *, + replace_symbols: dict[str, str] | None = None, + docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None, + rm_aio: bool = True, + ): + super().__init__( + sync_name=sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + rm_aio=rm_aio, + add_mapping_for_name=None, + ) + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Transform async method into sync + """ + import ast + + # replace async function with sync function + converted = ast.copy_location( + ast.FunctionDef( + wrapped_node.name, + wrapped_node.args, + wrapped_node.body, + wrapped_node.decorator_list + if hasattr(wrapped_node, "decorator_list") + else [], + wrapped_node.returns if hasattr(wrapped_node, "returns") else None, + ), + wrapped_node, + ) + # transform based on arguments + return super().sync_ast_transform(converted, transformers_globals) + + +class Drop(AstDecorator): + """ + Method decorator to drop methods or classes from the sync output + """ + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Drop from sync output + """ + return None + + +class Pytest(AstDecorator): + """ + Used in place of pytest.mark.asyncio to mark tests + + When generating sync version, also runs rm_aio to remove async keywords from + entire test function + + Args: + rm_aio: if True, automatically strip all asyncio keywords from test code. + Defaults to True, to simplify test code generation. + """ + + def __init__(self, rm_aio=True): + self.rm_aio = rm_aio + + def async_decorator(self): + import pytest + + return pytest.mark.asyncio + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + convert async to sync + """ + import ast + + # always convert method to sync + converted = ast.copy_location( + ast.FunctionDef( + wrapped_node.name, + wrapped_node.args, + wrapped_node.body, + wrapped_node.decorator_list + if hasattr(wrapped_node, "decorator_list") + else [], + wrapped_node.returns if hasattr(wrapped_node, "returns") else None, + ), + wrapped_node, + ) + # convert entire body to sync if rm_aio is set + if self.rm_aio: + converted = transformers_globals["AsyncToSync"]().visit(converted) + return converted + + +class PytestFixture(AstDecorator): + """ + Used in place of pytest.fixture or pytest.mark.asyncio to mark fixtures + + Args: + *args: all arguments to pass to pytest.fixture + **kwargs: all keyword arguments to pass to pytest.fixture + """ + + def __init__(self, *args, **kwargs): + self._args = args + self._kwargs = kwargs + + def async_decorator(self): + import pytest_asyncio # type: ignore + + return lambda f: pytest_asyncio.fixture(*self._args, **self._kwargs)(f) + + def sync_ast_transform(self, wrapped_node, transformers_globals): + import ast + import copy + + arg_nodes = [ + a if isinstance(a, ast.expr) else ast.Constant(value=a) for a in self._args + ] + kwarg_nodes = [] + for k, v in self._kwargs.items(): + if not isinstance(v, ast.expr): + v = ast.Constant(value=v) + kwarg_nodes.append(ast.keyword(arg=k, value=v)) + + new_node = copy.deepcopy(wrapped_node) + if not hasattr(new_node, "decorator_list"): + new_node.decorator_list = [] + new_node.decorator_list.append( + ast.Call( + func=ast.Attribute( + value=ast.Name(id="pytest", ctx=ast.Load()), + attr="fixture", + ctx=ast.Load(), + ), + args=arg_nodes, + keywords=kwarg_nodes, + ) + ) + return new_node diff --git a/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py b/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py new file mode 100644 index 000000000..5312708cc --- /dev/null +++ b/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py @@ -0,0 +1,64 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations +from typing import Any + + +class MappingMeta(type): + """ + Metaclass to provide add_mapping functionality, allowing users to add + custom attributes to derived classes at runtime. + + Using a metaclass allows us to share functionality between CrossSync + and CrossSync._Sync_Impl, and it works better with mypy checks than + monkypatching + """ + + # list of attributes that can be added to the derived class at runtime + _runtime_replacements: dict[tuple[MappingMeta, str], Any] = {} + + def add_mapping(cls: MappingMeta, name: str, value: Any): + """ + Add a new attribute to the class, for replacing library-level symbols + + Raises: + - AttributeError if the attribute already exists with a different value + """ + key = (cls, name) + old_value = cls._runtime_replacements.get(key) + if old_value is None: + cls._runtime_replacements[key] = value + elif old_value != value: + raise AttributeError(f"Conflicting assignments for CrossSync.{name}") + + def add_mapping_decorator(cls: MappingMeta, name: str): + """ + Exposes add_mapping as a class decorator + """ + + def decorator(wrapped_cls): + cls.add_mapping(name, wrapped_cls) + return wrapped_cls + + return decorator + + def __getattr__(cls: MappingMeta, name: str): + """ + Retrieve custom attributes + """ + key = (cls, name) + found = cls._runtime_replacements.get(key) + if found is not None: + return found + raise AttributeError(f"CrossSync has no attribute {name}") diff --git a/google/cloud/bigtable/data/_cross_sync/cross_sync.py b/google/cloud/bigtable/data/_cross_sync/cross_sync.py new file mode 100644 index 000000000..1f1ee111a --- /dev/null +++ b/google/cloud/bigtable/data/_cross_sync/cross_sync.py @@ -0,0 +1,334 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +CrossSync provides a toolset for sharing logic between async and sync codebases, including: +- A set of decorators for annotating async classes and functions + (@CrossSync.export_sync, @CrossSync.convert, @CrossSync.drop_method, ...) +- A set of wrappers to wrap common objects and types that have corresponding async and sync implementations + (CrossSync.Queue, CrossSync.Condition, CrossSync.Future, ...) +- A set of function implementations for common async operations that can be used in both async and sync codebases + (CrossSync.gather_partials, CrossSync.wait, CrossSync.condition_wait, ...) +- CrossSync.rm_aio(), which is used to annotate regions of the code containing async keywords to strip + +A separate module will use CrossSync annotations to generate a corresponding sync +class based on a decorated async class. + +Usage Example: +```python +@CrossSync.export_sync(path="path/to/sync_module.py") + + @CrossSync.convert + async def async_func(self, arg: int) -> int: + await CrossSync.sleep(1) + return arg +``` +""" + +from __future__ import annotations + +from typing import ( + TypeVar, + Any, + Callable, + Coroutine, + Sequence, + Union, + AsyncIterable, + AsyncIterator, + AsyncGenerator, + TYPE_CHECKING, +) +import typing + +import asyncio +import sys +import concurrent.futures +import google.api_core.retry as retries +import queue +import threading +import time +from ._decorators import ( + ConvertClass, + Convert, + Drop, + Pytest, + PytestFixture, +) +from ._mapping_meta import MappingMeta + +if TYPE_CHECKING: + from typing_extensions import TypeAlias + +T = TypeVar("T") + + +class CrossSync(metaclass=MappingMeta): + # support CrossSync.is_async to check if the current environment is async + is_async = True + + # provide aliases for common async functions and types + sleep = asyncio.sleep + retry_target = retries.retry_target_async + retry_target_stream = retries.retry_target_stream_async + Retry = retries.AsyncRetry + Queue: TypeAlias = asyncio.Queue + Condition: TypeAlias = asyncio.Condition + Future: TypeAlias = asyncio.Future + Task: TypeAlias = asyncio.Task + Event: TypeAlias = asyncio.Event + Semaphore: TypeAlias = asyncio.Semaphore + StopIteration: TypeAlias = StopAsyncIteration + # provide aliases for common async type annotations + Awaitable: TypeAlias = typing.Awaitable + Iterable: TypeAlias = AsyncIterable + Iterator: TypeAlias = AsyncIterator + Generator: TypeAlias = AsyncGenerator + + # decorators + convert_class = ConvertClass.decorator # decorate classes to convert + convert = Convert.decorator # decorate methods to convert from async to sync + drop = Drop.decorator # decorate methods to remove from sync version + pytest = Pytest.decorator # decorate test methods to run with pytest-asyncio + pytest_fixture = ( + PytestFixture.decorator + ) # decorate test methods to run with pytest fixture + + @classmethod + def next(cls, iterable): + return iterable.__anext__() + + @classmethod + def Mock(cls, *args, **kwargs): + """ + Alias for AsyncMock, importing at runtime to avoid hard dependency on mock + """ + try: + from unittest.mock import AsyncMock # type: ignore + except ImportError: # pragma: NO COVER + from mock import AsyncMock # type: ignore + return AsyncMock(*args, **kwargs) + + @staticmethod + async def gather_partials( + partial_list: Sequence[Callable[[], Awaitable[T]]], + return_exceptions: bool = False, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + ) -> list[T | BaseException]: + """ + abstraction over asyncio.gather, but with a set of partial functions instead + of coroutines, to work with sync functions. + To use gather with a set of futures instead of partials, use CrpssSync.wait + + In the async version, the partials are expected to return an awaitable object. Patials + are unpacked and awaited in the gather call. + + Sync version implemented with threadpool executor + + Returns: + - a list of results (or exceptions, if return_exceptions=True) in the same order as partial_list + """ + if not partial_list: + return [] + awaitable_list = [partial() for partial in partial_list] + return await asyncio.gather( + *awaitable_list, return_exceptions=return_exceptions + ) + + @staticmethod + async def wait( + futures: Sequence[CrossSync.Future[T]], timeout: float | None = None + ) -> tuple[set[CrossSync.Future[T]], set[CrossSync.Future[T]]]: + """ + abstraction over asyncio.wait + + Return: + - a tuple of (done, pending) sets of futures + """ + if not futures: + return set(), set() + return await asyncio.wait(futures, timeout=timeout) + + @staticmethod + async def event_wait( + event: CrossSync.Event, + timeout: float | None = None, + async_break_early: bool = True, + ) -> None: + """ + abstraction over asyncio.Event.wait + + Args: + - event: event to wait for + - timeout: if set, will break out early after `timeout` seconds + - async_break_early: if False, the async version will wait for + the full timeout even if the event is set before the timeout. + This avoids creating a new background task + """ + if timeout is None: + await event.wait() + elif not async_break_early: + if not event.is_set(): + await asyncio.sleep(timeout) + else: + try: + await asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + pass + + @staticmethod + def create_task( + fn: Callable[..., Coroutine[Any, Any, T]], + *fn_args, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + task_name: str | None = None, + **fn_kwargs, + ) -> CrossSync.Task[T]: + """ + abstraction over asyncio.create_task. Sync version implemented with threadpool executor + + sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version + """ + task: CrossSync.Task[T] = asyncio.create_task(fn(*fn_args, **fn_kwargs)) + if task_name and sys.version_info >= (3, 8): + task.set_name(task_name) + return task + + @staticmethod + async def yield_to_event_loop() -> None: + """ + Call asyncio.sleep(0) to yield to allow other tasks to run + """ + await asyncio.sleep(0) + + @staticmethod + def verify_async_event_loop() -> None: + """ + Raises RuntimeError if the event loop is not running + """ + asyncio.get_running_loop() + + @staticmethod + def rm_aio(statement: T) -> T: + """ + Used to annotate regions of the code containing async keywords to strip + + All async keywords inside an rm_aio call are removed, along with + `async with` and `async for` statements containing CrossSync.rm_aio() in the body + """ + return statement + + class _Sync_Impl(metaclass=MappingMeta): + """ + Provide sync versions of the async functions and types in CrossSync + """ + + is_async = False + + sleep = time.sleep + next = next + retry_target = retries.retry_target + retry_target_stream = retries.retry_target_stream + Retry = retries.Retry + Queue: TypeAlias = queue.Queue + Condition: TypeAlias = threading.Condition + Future: TypeAlias = concurrent.futures.Future + Task: TypeAlias = concurrent.futures.Future + Event: TypeAlias = threading.Event + Semaphore: TypeAlias = threading.Semaphore + StopIteration: TypeAlias = StopIteration + # type annotations + Awaitable: TypeAlias = Union[T] + Iterable: TypeAlias = typing.Iterable + Iterator: TypeAlias = typing.Iterator + Generator: TypeAlias = typing.Generator + + @classmethod + def Mock(cls, *args, **kwargs): + from unittest.mock import Mock + + return Mock(*args, **kwargs) + + @staticmethod + def event_wait( + event: CrossSync._Sync_Impl.Event, + timeout: float | None = None, + async_break_early: bool = True, + ) -> None: + event.wait(timeout=timeout) + + @staticmethod + def gather_partials( + partial_list: Sequence[Callable[[], T]], + return_exceptions: bool = False, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + ) -> list[T | BaseException]: + if not partial_list: + return [] + if not sync_executor: + raise ValueError("sync_executor is required for sync version") + futures_list = [sync_executor.submit(partial) for partial in partial_list] + results_list: list[T | BaseException] = [] + for future in futures_list: + found_exc = future.exception() + if found_exc is not None: + if return_exceptions: + results_list.append(found_exc) + else: + raise found_exc + else: + results_list.append(future.result()) + return results_list + + @staticmethod + def wait( + futures: Sequence[CrossSync._Sync_Impl.Future[T]], + timeout: float | None = None, + ) -> tuple[ + set[CrossSync._Sync_Impl.Future[T]], set[CrossSync._Sync_Impl.Future[T]] + ]: + if not futures: + return set(), set() + return concurrent.futures.wait(futures, timeout=timeout) + + @staticmethod + def create_task( + fn: Callable[..., T], + *fn_args, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + task_name: str | None = None, + **fn_kwargs, + ) -> CrossSync._Sync_Impl.Task[T]: + """ + abstraction over asyncio.create_task. Sync version implemented with threadpool executor + + sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version + """ + if not sync_executor: + raise ValueError("sync_executor is required for sync version") + return sync_executor.submit(fn, *fn_args, **fn_kwargs) + + @staticmethod + def yield_to_event_loop() -> None: + """ + No-op for sync version + """ + pass + + @staticmethod + def verify_async_event_loop() -> None: + """ + No-op for sync version + """ + pass diff --git a/google/cloud/bigtable/data/_helpers.py b/google/cloud/bigtable/data/_helpers.py new file mode 100644 index 000000000..e848ebc6f --- /dev/null +++ b/google/cloud/bigtable/data/_helpers.py @@ -0,0 +1,309 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Helper functions used in various places in the library. +""" +from __future__ import annotations + +from typing import Sequence, List, Tuple, TYPE_CHECKING, Union +import time +import enum +from collections import namedtuple +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + +from google.api_core import exceptions as core_exceptions +from google.api_core.retry import exponential_sleep_generator +from google.api_core.retry import RetryFailureReason +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + +if TYPE_CHECKING: + import grpc + from google.cloud.bigtable.data._async.client import _DataApiTargetAsync + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget + +""" +Helper functions used in various places in the library. +""" + +# Type alias for the output of sample_keys +RowKeySamples = List[Tuple[bytes, int]] + +# type alias for the output of query.shard() +ShardedQuery = List[ReadRowsQuery] + +# used by read_rows_sharded to limit how many requests are attempted in parallel +_CONCURRENCY_LIMIT = 10 + +# used to identify an active bigtable resource that needs to be warmed through PingAndWarm +# each instance/app_profile_id pair needs to be individually tracked +_WarmedInstanceKey = namedtuple( + "_WarmedInstanceKey", ["instance_name", "app_profile_id"] +) + + +# enum used on method calls when table defaults should be used +class TABLE_DEFAULT(enum.Enum): + # default for mutate_row, sample_row_keys, check_and_mutate_row, and read_modify_write_row + DEFAULT = "DEFAULT" + # default for read_rows, read_rows_stream, read_rows_sharded, row_exists, and read_row + READ_ROWS = "READ_ROWS_DEFAULT" + # default for bulk_mutate_rows and mutations_batcher + MUTATE_ROWS = "MUTATE_ROWS_DEFAULT" + + +def _attempt_timeout_generator( + per_request_timeout: float | None, operation_timeout: float +): + """ + Generator that yields the timeout value for each attempt of a retry loop. + + Will return per_request_timeout until the operation_timeout is approached, + at which point it will return the remaining time in the operation_timeout. + + Args: + per_request_timeout: The timeout value to use for each request, in seconds. + If None, the operation_timeout will be used for each request. + operation_timeout: The timeout value to use for the entire operationm in seconds. + Yields: + float: The timeout value to use for the next request, in seonds + """ + per_request_timeout = ( + per_request_timeout if per_request_timeout is not None else operation_timeout + ) + deadline = operation_timeout + time.monotonic() + while True: + yield max(0, min(per_request_timeout, deadline - time.monotonic())) + + +def _retry_exception_factory( + exc_list: list[Exception], + reason: RetryFailureReason, + timeout_val: float | None, +) -> tuple[Exception, Exception | None]: + """ + Build retry error based on exceptions encountered during operation + + Args: + exc_list: list of exceptions encountered during operation + is_timeout: whether the operation failed due to timeout + timeout_val: the operation timeout value in seconds, for constructing + the error message + Returns: + tuple[Exception, Exception|None]: + tuple of the exception to raise, and a cause exception if applicable + """ + if reason == RetryFailureReason.TIMEOUT: + timeout_val_str = f"of {timeout_val:0.1f}s " if timeout_val is not None else "" + # if failed due to timeout, raise deadline exceeded as primary exception + source_exc: Exception = core_exceptions.DeadlineExceeded( + f"operation_timeout{timeout_val_str} exceeded" + ) + elif exc_list: + # otherwise, raise non-retryable error as primary exception + source_exc = exc_list.pop() + else: + source_exc = RuntimeError("failed with unspecified exception") + # use the retry exception group as the cause of the exception + cause_exc: Exception | None = RetryExceptionGroup(exc_list) if exc_list else None + source_exc.__cause__ = cause_exc + return source_exc, cause_exc + + +def _get_timeouts( + operation: float | TABLE_DEFAULT, + attempt: float | None | TABLE_DEFAULT, + table: "_DataApiTargetAsync" | "_DataApiTarget", +) -> tuple[float, float]: + """ + Convert passed in timeout values to floats, using table defaults if necessary. + + attempt will use operation value if None, or if larger than operation. + + Will call _validate_timeouts on the outputs, and raise ValueError if the + resulting timeouts are invalid. + + Args: + operation: The timeout value to use for the entire operation, in seconds. + attempt: The timeout value to use for each attempt, in seconds. + table: The table to use for default values. + Returns: + tuple[float, float]: A tuple of (operation_timeout, attempt_timeout) + """ + # load table defaults if necessary + if operation == TABLE_DEFAULT.DEFAULT: + final_operation = table.default_operation_timeout + elif operation == TABLE_DEFAULT.READ_ROWS: + final_operation = table.default_read_rows_operation_timeout + elif operation == TABLE_DEFAULT.MUTATE_ROWS: + final_operation = table.default_mutate_rows_operation_timeout + else: + final_operation = operation + if attempt == TABLE_DEFAULT.DEFAULT: + attempt = table.default_attempt_timeout + elif attempt == TABLE_DEFAULT.READ_ROWS: + attempt = table.default_read_rows_attempt_timeout + elif attempt == TABLE_DEFAULT.MUTATE_ROWS: + attempt = table.default_mutate_rows_attempt_timeout + + return _align_timeouts(final_operation, attempt) + + +def _align_timeouts(operation: float, attempt: float | None) -> tuple[float, float]: + """ + Convert passed in timeout values to floats. + + attempt will use operation value if None, or if larger than operation. + + Will call _validate_timeouts on the outputs, and raise ValueError if the + resulting timeouts are invalid. + + Args: + operation: The timeout value to use for the entire operation, in seconds. + attempt: The timeout value to use for each attempt, in seconds. + Returns: + tuple[float, float]: A tuple of (operation_timeout, attempt_timeout) + """ + if attempt is None: + # no timeout specified, use operation timeout for both + final_attempt = operation + else: + # cap attempt timeout at operation timeout + final_attempt = min(attempt, operation) if operation else attempt + + _validate_timeouts(operation, final_attempt, allow_none=False) + return operation, final_attempt + + +def _validate_timeouts( + operation_timeout: float, attempt_timeout: float | None, allow_none: bool = False +): + """ + Helper function that will verify that timeout values are valid, and raise + an exception if they are not. + + Args: + operation_timeout: The timeout value to use for the entire operation, in seconds. + attempt_timeout: The timeout value to use for each attempt, in seconds. + allow_none: If True, attempt_timeout can be None. If False, None values will raise an exception. + Raises: + ValueError: if operation_timeout or attempt_timeout are invalid. + """ + if operation_timeout is None: + raise ValueError("operation_timeout cannot be None") + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if not allow_none and attempt_timeout is None: + raise ValueError("attempt_timeout must not be None") + elif attempt_timeout is not None: + if attempt_timeout <= 0: + raise ValueError("attempt_timeout must be greater than 0") + + +def _get_error_type( + call_code: Union["grpc.StatusCode", int, type[Exception]] +) -> type[Exception]: + """Helper function for ensuring the object is an exception type. + If it is not, the proper GoogleAPICallError type is infered from the status + code. + + Args: + - call_code: Exception type or gRPC status code. + """ + if isinstance(call_code, type): + return call_code + else: + return type(core_exceptions.from_grpc_status(call_code, "")) + + +def _get_retryable_errors( + call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, + table: "_DataApiTargetAsync" | "_DataApiTarget", +) -> list[type[Exception]]: + """ + Convert passed in retryable error codes to a list of exception types. + + Args: + call_codes: The error codes to convert. Can be a list of grpc.StatusCode values, + int values, or Exception types, or a TABLE_DEFAULT value. + table: The table to use for default values. + Returns: + list[type[Exception]]: A list of exception types to retry on. + """ + # load table defaults if necessary + if call_codes == TABLE_DEFAULT.DEFAULT: + call_codes = table.default_retryable_errors + elif call_codes == TABLE_DEFAULT.READ_ROWS: + call_codes = table.default_read_rows_retryable_errors + elif call_codes == TABLE_DEFAULT.MUTATE_ROWS: + call_codes = table.default_mutate_rows_retryable_errors + + return [_get_error_type(e) for e in call_codes] + + +class TrackedBackoffGenerator: + """ + Generator class for exponential backoff sleep times. + This implementation builds on top of api_core.retries.exponential_sleep_generator, + adding the ability to retrieve previous values using get_attempt_backoff(idx). + This is used by the Metrics class to track the sleep times used for each attempt. + """ + + def __init__(self, initial=0.01, maximum=60, multiplier=2): + self.history = [] + self.subgenerator = exponential_sleep_generator( + initial=initial, maximum=maximum, multiplier=multiplier + ) + self._next_override: float | None = None + + def __iter__(self): + return self + + def set_next(self, next_value: float): + """ + Set the next backoff value, instead of generating one from subgenerator. + After the value is yielded, it will go back to using self.subgenerator. + + If set_next is called twice before the next() is called, only the latest + value will be used and others discarded + + Args: + next_value: the upcomming value to yield when next() is called + Raises: + ValueError: if next_value is negative + """ + if next_value < 0: + raise ValueError("backoff value cannot be less than 0") + self._next_override = next_value + + def __next__(self) -> float: + if self._next_override is not None: + next_backoff = self._next_override + self._next_override = None + else: + next_backoff = next(self.subgenerator) + self.history.append(next_backoff) + return next_backoff + + def get_attempt_backoff(self, attempt_idx) -> float: + """ + returns the backoff time for a specific attempt index, starting at 0. + + Args: + attempt_idx: the index of the attempt to return backoff for + Raises: + IndexError: if attempt_idx is negative, or not in history + """ + if attempt_idx < 0: + raise IndexError("received negative attempt number") + return self.history[attempt_idx] diff --git a/google/cloud/bigtable/data/_metrics/__init__.py b/google/cloud/bigtable/data/_metrics/__init__.py new file mode 100644 index 000000000..26cfc1326 --- /dev/null +++ b/google/cloud/bigtable/data/_metrics/__init__.py @@ -0,0 +1,35 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from google.cloud.bigtable.data._metrics.metrics_controller import ( + BigtableClientSideMetricsController, +) + +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric +from google.cloud.bigtable.data._metrics.data_model import CompletedOperationMetric +from google.cloud.bigtable.data._metrics.data_model import CompletedAttemptMetric +from google.cloud.bigtable.data._metrics.data_model import OperationState +from google.cloud.bigtable.data._metrics.data_model import OperationType +from google.cloud.bigtable.data._metrics.tracked_retry import tracked_retry + +__all__ = ( + "BigtableClientSideMetricsController", + "OperationType", + "OperationState", + "ActiveOperationMetric", + "ActiveAttemptMetric", + "CompletedOperationMetric", + "CompletedAttemptMetric", + "tracked_retry", +) diff --git a/google/cloud/bigtable/data/_metrics/data_model.py b/google/cloud/bigtable/data/_metrics/data_model.py new file mode 100644 index 000000000..64dd63bfa --- /dev/null +++ b/google/cloud/bigtable/data/_metrics/data_model.py @@ -0,0 +1,469 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import ClassVar, Tuple, cast, TYPE_CHECKING + +import time +import re +import logging +import contextvars + +from enum import Enum +from functools import lru_cache +from dataclasses import dataclass +from dataclasses import field +from grpc import StatusCode +from grpc import RpcError +from grpc.aio import AioRpcError + +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable_v2.types.response_params import ResponseParams +from google.cloud.bigtable.data._helpers import TrackedBackoffGenerator +from google.protobuf.message import DecodeError + +if TYPE_CHECKING: + from google.cloud.bigtable.data._metrics.handlers._base import MetricsHandler + + +LOGGER = logging.getLogger(__name__) + +# default values for zone and cluster data, if not captured +DEFAULT_ZONE = "global" +DEFAULT_CLUSTER_ID = "" + +# keys for parsing metadata blobs +BIGTABLE_LOCATION_METADATA_KEY = "x-goog-ext-425905942-bin" +SERVER_TIMING_METADATA_KEY = "server-timing" +SERVER_TIMING_REGEX = re.compile(r".*gfet4t7;\s*dur=(\d+\.?\d*).*") + +INVALID_STATE_ERROR = "Invalid state for {}: {}" + + +class OperationType(Enum): + """Enum for the type of operation being performed.""" + + READ_ROWS = "ReadRows" + SAMPLE_ROW_KEYS = "SampleRowKeys" + BULK_MUTATE_ROWS = "MutateRows" + MUTATE_ROW = "MutateRow" + CHECK_AND_MUTATE = "CheckAndMutateRow" + READ_MODIFY_WRITE = "ReadModifyWriteRow" + + +class OperationState(Enum): + """Enum for the state of the active operation. + + ┌───────────┐ + │ CREATED │────────┐ + └─────┬─────┘ │ + │ │ + â–Œ │ + ┌▶ ACTIVE_ATTEMPT ───┐│ + │ │ ││ + │ â–Œ ││ + └─ BETWEEN_ATTEMPTS ││ + │ ││ + â–Œ ││ + ┌───────────┐ ││ + │ COMPLETED │ ◀─────┘│ + └───────────┘ ◀──────┘ + """ + + CREATED = 0 + ACTIVE_ATTEMPT = 1 + BETWEEN_ATTEMPTS = 2 + COMPLETED = 3 + + +@dataclass(frozen=True) +class CompletedAttemptMetric: + """ + An immutable dataclass representing the data associated with a + completed rpc attempt. + + Operation-level fields (eg. type, cluster, zone) are stored on the + corresponding CompletedOperationMetric or ActiveOperationMetric object. + """ + + duration_ns: int + end_status: StatusCode + gfe_latency_ns: int | None = None + application_blocking_time_ns: int = 0 + backoff_before_attempt_ns: int = 0 + + +@dataclass(frozen=True) +class CompletedOperationMetric: + """ + An immutable dataclass representing the data associated with a + completed rpc operation. + + Attempt-level fields (eg. duration, latencies, etc) are stored on the + corresponding CompletedAttemptMetric object. + """ + + op_type: OperationType + duration_ns: int + completed_attempts: list[CompletedAttemptMetric] + final_status: StatusCode + cluster_id: str + zone: str + is_streaming: bool + first_response_latency_ns: int | None = None + flow_throttling_time_ns: int = 0 + + +@dataclass +class ActiveAttemptMetric: + """ + A dataclass representing the data associated with an rpc attempt that is + currently in progress. Fields are mutable and may be optional. + """ + + # keep monotonic timestamps for active attempts + start_time_ns: int = field(default_factory=lambda: time.monotonic_ns()) + # the time taken by the backend, in nanoseconds. Taken from response header + gfe_latency_ns: int | None = None + # time waiting on user to process the response, in nanoseconds + # currently only relevant for ReadRows + application_blocking_time_ns: int = 0 + # backoff time is added to application_blocking_time_ns + backoff_before_attempt_ns: int = 0 + + +@dataclass +class ActiveOperationMetric: + """ + A dataclass representing the data associated with an rpc operation that is + currently in progress. Fields are mutable and may be optional. + """ + + op_type: OperationType + state: OperationState = OperationState.CREATED + # create a default backoff generator, initialized with standard default backoff values + backoff_generator: TrackedBackoffGenerator = field( + default_factory=lambda: TrackedBackoffGenerator( + initial=0.01, maximum=60, multiplier=2 + ) + ) + # keep monotonic timestamps for active operations + start_time_ns: int = field(default_factory=lambda: time.monotonic_ns()) + active_attempt: ActiveAttemptMetric | None = None + cluster_id: str | None = None + zone: str | None = None + completed_attempts: list[CompletedAttemptMetric] = field(default_factory=list) + is_streaming: bool = False # only True for read_rows operations + handlers: list[MetricsHandler] = field(default_factory=list) + # the time it takes to recieve the first response from the server, in nanoseconds + # attached by interceptor + # currently only tracked for ReadRows + first_response_latency_ns: int | None = None + # time waiting on flow control, in nanoseconds + flow_throttling_time_ns: int = 0 + + _active_operation_context: ClassVar[ + contextvars.ContextVar[ActiveOperationMetric] + ] = contextvars.ContextVar("active_operation_context") + + @classmethod + def from_context(cls) -> ActiveOperationMetric | None: + """Retrieves the active operation from the current execution context. + + Because execution within a context is sequential, this guarantees + retrieval of the single, unique operation, isolated from other + concurrent RPCs. + + Note: + This is intended to be called by gRPC interceptors at the start + of an RPC. + + Returns: + ActiveOperationMetric: The current active operation. + None: If no operation is set, or if the current operation is + already in the `COMPLETED` state. + """ + op = cls._active_operation_context.get(None) + if op and op.state == OperationState.COMPLETED: + return None + return op + + def __post_init__(self): + """ + Save new instances to contextvars on init + """ + self._active_operation_context.set(self) + + def start(self) -> None: + """ + Optionally called to mark the start of the operation. If not called, + the operation will be started at initialization. + + StartState: CREATED + EndState: CREATED + """ + if self.state != OperationState.CREATED: + return self._handle_error(INVALID_STATE_ERROR.format("start", self.state)) + self.start_time_ns = time.monotonic_ns() + # set as active operation in contextvars + self._active_operation_context.set(self) + + def start_attempt(self) -> ActiveAttemptMetric | None: + """ + Called to initiate a new attempt for the operation. + + StartState: CREATED | BETWEEN_ATTEMPTS + EndState: ACTIVE_ATTEMPT + """ + if ( + self.state != OperationState.BETWEEN_ATTEMPTS + and self.state != OperationState.CREATED + ): + return self._handle_error( + INVALID_STATE_ERROR.format("start_attempt", self.state) + ) + # set as active operation in contextvars + self._active_operation_context.set(self) + + try: + # find backoff value before this attempt + prev_attempt_idx = len(self.completed_attempts) - 1 + backoff = self.backoff_generator.get_attempt_backoff(prev_attempt_idx) + # generator will return the backoff time in seconds, so convert to nanoseconds + backoff_ns = int(backoff * 1e9) + except IndexError: + # backoff value not found + backoff_ns = 0 + + self.active_attempt = ActiveAttemptMetric(backoff_before_attempt_ns=backoff_ns) + self.state = OperationState.ACTIVE_ATTEMPT + return self.active_attempt + + def add_response_metadata(self, metadata: dict[str, bytes | str]) -> None: + """ + Attach trailing metadata to the active attempt. + + If not called, default values for the metadata will be used. + + StartState: ACTIVE_ATTEMPT + EndState: ACTIVE_ATTEMPT + + Args: + - metadata: the metadata as extracted from the grpc call + """ + if self.state != OperationState.ACTIVE_ATTEMPT: + return self._handle_error( + INVALID_STATE_ERROR.format("add_response_metadata", self.state) + ) + if self.cluster_id is None or self.zone is None: + # BIGTABLE_LOCATION_METADATA_KEY should give a binary-encoded ResponseParams proto + blob = cast(bytes, metadata.get(BIGTABLE_LOCATION_METADATA_KEY)) + if blob: + parse_result = self._parse_response_metadata_blob(blob) + if parse_result is not None: + cluster, zone = parse_result + if cluster: + self.cluster_id = cluster + if zone: + self.zone = zone + else: + self._handle_error( + f"Failed to decode {BIGTABLE_LOCATION_METADATA_KEY} metadata: {blob!r}" + ) + # SERVER_TIMING_METADATA_KEY should give a string with the server-latency headers + timing_header = cast(str, metadata.get(SERVER_TIMING_METADATA_KEY)) + if timing_header: + timing_data = SERVER_TIMING_REGEX.match(timing_header) + if timing_data and self.active_attempt: + gfe_latency_ms = float(timing_data.group(1)) + self.active_attempt.gfe_latency_ns = int(gfe_latency_ms * 1e6) + + @staticmethod + @lru_cache(maxsize=32) + def _parse_response_metadata_blob(blob: bytes) -> Tuple[str, str] | None: + """ + Parse the response metadata blob and return a tuple of cluster and zone. + + Function is cached to avoid parsing the same blob multiple times. + + Args: + - blob: the metadata blob as extracted from the grpc call + Returns: + - a tuple of cluster_id and zone, or None if parsing failed + """ + try: + proto = ResponseParams.pb().FromString(blob) + return proto.cluster_id, proto.zone_id + except (DecodeError, TypeError): + # failed to parse metadata + return None + + def end_attempt_with_status(self, status: StatusCode | BaseException) -> None: + """ + Called to mark the end of an attempt for the operation. + + Typically, this is used to mark a retryable error. If a retry will not + be attempted, `end_with_status` or `end_with_success` should be used + to finalize the operation along with the attempt. + + StartState: ACTIVE_ATTEMPT + EndState: BETWEEN_ATTEMPTS + + Args: + - status: The status of the attempt. + """ + if self.state != OperationState.ACTIVE_ATTEMPT or self.active_attempt is None: + return self._handle_error( + INVALID_STATE_ERROR.format("end_attempt_with_status", self.state) + ) + if isinstance(status, BaseException): + status = self._exc_to_status(status) + duration_ns = self._ensure_positive( + time.monotonic_ns() - self.active_attempt.start_time_ns, "duration" + ) + complete_attempt = CompletedAttemptMetric( + duration_ns=duration_ns, + end_status=status, + gfe_latency_ns=self.active_attempt.gfe_latency_ns, + application_blocking_time_ns=self.active_attempt.application_blocking_time_ns, + backoff_before_attempt_ns=self.active_attempt.backoff_before_attempt_ns, + ) + self.completed_attempts.append(complete_attempt) + self.active_attempt = None + self.state = OperationState.BETWEEN_ATTEMPTS + for handler in self.handlers: + handler.on_attempt_complete(complete_attempt, self) + + def end_with_status(self, status: StatusCode | BaseException) -> None: + """ + Called to mark the end of the operation. If there is an active attempt, + end_attempt_with_status will be called with the same status. + + StartState: CREATED | ACTIVE_ATTEMPT | BETWEEN_ATTEMPTS + EndState: COMPLETED + + Causes on_operation_completed to be called for each registered handler. + + Args: + - status: The status of the operation. + """ + if self.state == OperationState.COMPLETED: + return self._handle_error( + INVALID_STATE_ERROR.format("end_with_status", self.state) + ) + final_status = ( + self._exc_to_status(status) if isinstance(status, BaseException) else status + ) + if self.state == OperationState.ACTIVE_ATTEMPT: + self.end_attempt_with_status(final_status) + duration_ns = self._ensure_positive( + time.monotonic_ns() - self.start_time_ns, "duration" + ) + finalized = CompletedOperationMetric( + op_type=self.op_type, + completed_attempts=self.completed_attempts, + duration_ns=duration_ns, + final_status=final_status, + cluster_id=self.cluster_id or DEFAULT_CLUSTER_ID, + zone=self.zone or DEFAULT_ZONE, + is_streaming=self.is_streaming, + first_response_latency_ns=self.first_response_latency_ns, + flow_throttling_time_ns=self.flow_throttling_time_ns, + ) + self.state = OperationState.COMPLETED + for handler in self.handlers: + handler.on_operation_complete(finalized) + + def end_with_success(self): + """ + Called to mark the end of the operation with a successful status. + + StartState: CREATED | ACTIVE_ATTEMPT | BETWEEN_ATTEMPTS + EndState: COMPLETED + + Causes on_operation_completed to be called for each registered handler. + """ + return self.end_with_status(StatusCode.OK) + + @staticmethod + def _exc_to_status(exc: BaseException) -> StatusCode: + """ + Extracts the grpc status code from an exception. + + Exception groups and wrappers will be parsed to find the underlying + grpc Exception. + + If the exception is not a grpc exception, will return StatusCode.UNKNOWN. + + Args: + - exc: The exception to extract the status code from. + """ + if isinstance(exc, bt_exceptions._BigtableExceptionGroup): + exc = exc.exceptions[-1] + if hasattr(exc, "grpc_status_code") and exc.grpc_status_code is not None: + return exc.grpc_status_code + if ( + exc.__cause__ + and hasattr(exc.__cause__, "grpc_status_code") + and exc.__cause__.grpc_status_code is not None + ): + return exc.__cause__.grpc_status_code + if isinstance(exc, AioRpcError) or isinstance(exc, RpcError): + return exc.code() + return StatusCode.UNKNOWN + + @staticmethod + def _handle_error(message: str) -> None: + """ + log error metric system error messages + + Args: + - message: The message to include in the exception or warning. + """ + full_message = f"Error in Bigtable Metrics: {message}" + LOGGER.warning(full_message) + + def _ensure_positive(self, value: int, field_name: str) -> int: + """ + Helper to replace negative value with 0, and record an error + """ + if value < 0: + self._handle_error(f"received negative value for {field_name}: {value}") + return 0 + return value + + def __enter__(self): + """ + Implements the async manager protocol + + Using the operation's context manager provides assurances that the operation + is always closed when complete, with the proper status code automaticallty + detected when an exception is raised. + """ + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Implements the context manager protocol + + The operation is automatically ended on exit, with the status determined + by the exception type and value. + + If operation was already ended manually, do nothing. + """ + if not self.state == OperationState.COMPLETED: + if exc_val is None: + self.end_with_success() + else: + self.end_with_status(exc_val) diff --git a/google/cloud/bigtable/data/_metrics/handlers/_base.py b/google/cloud/bigtable/data/_metrics/handlers/_base.py new file mode 100644 index 000000000..884091fdd --- /dev/null +++ b/google/cloud/bigtable/data/_metrics/handlers/_base.py @@ -0,0 +1,38 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import CompletedAttemptMetric +from google.cloud.bigtable.data._metrics.data_model import CompletedOperationMetric + + +class MetricsHandler: + """ + Base class for all metrics handlers. Metrics handlers will receive callbacks + when operations and attempts are completed, and can use this information to + update some external metrics system. + """ + + def __init__(self, **kwargs): + pass + + def on_operation_complete(self, op: CompletedOperationMetric) -> None: + pass + + def on_attempt_complete( + self, attempt: CompletedAttemptMetric, op: ActiveOperationMetric + ) -> None: + pass + + def close(self): + pass diff --git a/google/cloud/bigtable/data/_metrics/metrics_controller.py b/google/cloud/bigtable/data/_metrics/metrics_controller.py new file mode 100644 index 000000000..e9815f201 --- /dev/null +++ b/google/cloud/bigtable/data/_metrics/metrics_controller.py @@ -0,0 +1,63 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.handlers._base import MetricsHandler +from google.cloud.bigtable.data._metrics.data_model import OperationType + + +class BigtableClientSideMetricsController: + """ + BigtableClientSideMetricsController is responsible for managing the + lifecycle of the metrics system. The Bigtable client library will + use this class to create new operations. Each operation will be + registered with the handlers associated with this controller. + """ + + def __init__( + self, + handlers: list[MetricsHandler] | None = None, + ): + """ + Initializes the metrics controller. + + Args: + - handlers: A list of MetricsHandler objects to subscribe to metrics events. + """ + self.handlers: list[MetricsHandler] = handlers or [] + + def add_handler(self, handler: MetricsHandler) -> None: + """ + Add a new handler to the list of handlers. + + Args: + - handler: A MetricsHandler object to add to the list of subscribed handlers. + """ + self.handlers.append(handler) + + def create_operation( + self, op_type: OperationType, **kwargs + ) -> ActiveOperationMetric: + """ + Creates a new operation and registers it with the subscribed handlers. + """ + return ActiveOperationMetric(op_type, **kwargs, handlers=self.handlers) + + def close(self): + """ + Close all handlers. + """ + for handler in self.handlers: + handler.close() diff --git a/google/cloud/bigtable/data/_metrics/tracked_retry.py b/google/cloud/bigtable/data/_metrics/tracked_retry.py new file mode 100644 index 000000000..94d2e5dcb --- /dev/null +++ b/google/cloud/bigtable/data/_metrics/tracked_retry.py @@ -0,0 +1,133 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Methods for instrumenting an google.api_core.retry.retry_target or +google.api_core.retry.retry_target_stream method + +`tracked_retry` will intercept `on_error` and `exception_factory` +methods to update the associated ActiveOperationMetric when exceptions +are encountered through the retryable rpc. +""" +from __future__ import annotations + +from typing import Callable, List, Optional, Tuple, TypeVar + +from grpc import StatusCode +from google.api_core.exceptions import GoogleAPICallError +from google.api_core.retry import RetryFailureReason +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._metrics import ActiveOperationMetric +from google.cloud.bigtable.data._metrics import OperationState + + +T = TypeVar("T") + + +ExceptionFactoryType = Callable[ + [List[Exception], RetryFailureReason, Optional[float]], + Tuple[Exception, Optional[Exception]], +] + + +def _track_retryable_error( + operation: ActiveOperationMetric, +) -> Callable[[Exception], None]: + """ + Used as input to api_core.Retry classes, to track when retryable errors are encountered + + Should be passed as on_error callback + """ + + def wrapper(exc: Exception) -> None: + try: + # record metadata from failed rpc + if isinstance(exc, GoogleAPICallError) and exc.errors: + rpc_error = exc.errors[-1] + metadata = list(rpc_error.trailing_metadata()) + list( + rpc_error.initial_metadata() + ) + operation.add_response_metadata({k: v for k, v in metadata}) + except Exception: + # ignore errors in metadata collection + pass + if isinstance(exc, _MutateRowsIncomplete): + # _MutateRowsIncomplete represents a successful rpc with some failed mutations + # mark the attempt as successful + operation.end_attempt_with_status(StatusCode.OK) + else: + operation.end_attempt_with_status(exc) + + return wrapper + + +def _track_terminal_error( + operation: ActiveOperationMetric, exception_factory: ExceptionFactoryType +) -> ExceptionFactoryType: + """ + Used as input to api_core.Retry classes, to track when terminal errors are encountered + + Should be used as a wrapper over an exception_factory callback + """ + + def wrapper( + exc_list: List[Exception], + reason: RetryFailureReason, + timeout_val: float | None, + ) -> tuple[Exception, Exception | None]: + source_exc, cause_exc = exception_factory(exc_list, reason, timeout_val) + try: + # record metadata from failed rpc + if isinstance(source_exc, GoogleAPICallError) and source_exc.errors: + rpc_error = source_exc.errors[-1] + metadata = list(rpc_error.trailing_metadata()) + list( + rpc_error.initial_metadata() + ) + operation.add_response_metadata({k: v for k, v in metadata}) + except Exception: + # ignore errors in metadata collection + pass + if ( + reason == RetryFailureReason.TIMEOUT + and operation.state == OperationState.ACTIVE_ATTEMPT + and exc_list + ): + # record ending attempt for timeout failures + attempt_exc = exc_list[-1] + _track_retryable_error(operation)(attempt_exc) + operation.end_with_status(source_exc) + return source_exc, cause_exc + + return wrapper + + +def tracked_retry( + *, + retry_fn: Callable[..., T], + operation: ActiveOperationMetric, + **kwargs, +) -> T: + """ + Wrapper for retry_rarget or retry_target_stream, which injects methods to + track the lifecycle of the retry using the provided ActiveOperationMetric + """ + in_exception_factory = kwargs.pop("exception_factory", _retry_exception_factory) + kwargs.pop("on_error", None) + kwargs.pop("sleep_generator", None) + return retry_fn( + sleep_generator=operation.backoff_generator, + on_error=_track_retryable_error(operation), + exception_factory=_track_terminal_error(operation, in_exception_factory), + **kwargs, + ) diff --git a/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py b/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py new file mode 100644 index 000000000..3bf7b562f --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py @@ -0,0 +1,184 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +import google.cloud.bigtable_v2.types.bigtable as types_pb +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import _EntryWithProto +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable_v2.services.bigtable.client import ( + BigtableClient as GapicClientType, + ) + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) + + +class _MutateRowsOperation: + """ + MutateRowsOperation manages the logic of sending a set of row mutations, + and retrying on failed entries. It manages this using the _run_attempt + function, which attempts to mutate all outstanding entries, and raises + _MutateRowsIncomplete if any retryable errors are encountered. + + Errors are exposed as a MutationsExceptionGroup, which contains a list of + exceptions organized by the related failed mutation entries. + + Args: + gapic_client: the client to use for the mutate_rows call + target: the table or view associated with the request + mutation_entries: a list of RowMutationEntry objects to send to the server + operation_timeout: the timeout to use for the entire operation, in seconds. + attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. + If not specified, the request will run until operation_timeout is reached. + """ + + def __init__( + self, + gapic_client: GapicClientType, + target: TargetType, + mutation_entries: list["RowMutationEntry"], + operation_timeout: float, + attempt_timeout: float | None, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + total_mutations = sum((len(entry.mutations) for entry in mutation_entries)) + if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + f"mutate_rows requests can contain at most {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across all entries. Found {total_mutations}." + ) + self._target = target + self._gapic_fn = gapic_client.mutate_rows + self.is_retryable = retries.if_exception_type( + *retryable_exceptions, bt_exceptions._MutateRowsIncomplete + ) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + self._operation = lambda: CrossSync._Sync_Impl.retry_target( + self._run_attempt, + self.is_retryable, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self.timeout_generator = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries] + self.remaining_indices = list(range(len(self.mutations))) + self.errors: dict[int, list[Exception]] = {} + + def start(self): + """Start the operation, and run until completion + + Raises: + MutationsExceptionGroup: if any mutations failed""" + try: + self._operation() + except Exception as exc: + incomplete_indices = self.remaining_indices.copy() + for idx in incomplete_indices: + self._handle_entry_error(idx, exc) + finally: + all_errors: list[Exception] = [] + for idx, exc_list in self.errors.items(): + if len(exc_list) == 0: + raise core_exceptions.ClientError( + f"Mutation {idx} failed with no associated errors" + ) + elif len(exc_list) == 1: + cause_exc = exc_list[0] + else: + cause_exc = bt_exceptions.RetryExceptionGroup(exc_list) + entry = self.mutations[idx].entry + all_errors.append( + bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc) + ) + if all_errors: + raise bt_exceptions.MutationsExceptionGroup( + all_errors, len(self.mutations) + ) + + def _run_attempt(self): + """Run a single attempt of the mutate_rows rpc. + + Raises: + _MutateRowsIncomplete: if there are failed mutations eligible for + retry after the attempt is complete + GoogleAPICallError: if the gapic rpc fails""" + request_entries = [self.mutations[idx].proto for idx in self.remaining_indices] + active_request_indices = { + req_idx: orig_idx + for (req_idx, orig_idx) in enumerate(self.remaining_indices) + } + self.remaining_indices = [] + if not request_entries: + return + try: + result_generator = self._gapic_fn( + request=types_pb.MutateRowsRequest( + entries=request_entries, + app_profile_id=self._target.app_profile_id, + **self._target._request_path, + ), + timeout=next(self.timeout_generator), + retry=None, + ) + for result_list in result_generator: + for result in result_list.entries: + orig_idx = active_request_indices[result.index] + entry_error = core_exceptions.from_grpc_status( + result.status.code, + result.status.message, + details=result.status.details, + ) + if result.status.code != 0: + self._handle_entry_error(orig_idx, entry_error) + elif orig_idx in self.errors: + del self.errors[orig_idx] + del active_request_indices[result.index] + except Exception as exc: + for idx in active_request_indices.values(): + self._handle_entry_error(idx, exc) + raise + if self.remaining_indices: + raise bt_exceptions._MutateRowsIncomplete + + def _handle_entry_error(self, idx: int, exc: Exception): + """Add an exception to the list of exceptions for a given mutation index, + and add the index to the list of remaining indices if the exception is + retryable. + + Args: + idx: the index of the mutation that failed + exc: the exception to add to the list""" + entry = self.mutations[idx].entry + self.errors.setdefault(idx, []).append(exc) + if ( + entry.is_idempotent() + and self.is_retryable(exc) + and (idx not in self.remaining_indices) + ): + self.remaining_indices.append(idx) diff --git a/google/cloud/bigtable/data/_sync_autogen/_read_rows.py b/google/cloud/bigtable/data/_sync_autogen/_read_rows.py new file mode 100644 index 000000000..3593475a9 --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/_read_rows.py @@ -0,0 +1,304 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB +from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import RowRange as RowRangePB +from google.cloud.bigtable.data.row import Row, Cell +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data.exceptions import _ResetRow +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.api_core import retry as retries +from google.api_core.retry import exponential_sleep_generator +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) + + +class _ReadRowsOperation: + """ + ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream + into a stream of Row objects. + + ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse + and turns them into a stream of Row objects using an internal + StateMachine. + + ReadRowsOperation(request, client) handles row merging logic end-to-end, including + performing retries on stream errors. + + Args: + query: The query to execute + target: The table or view to send the request to + operation_timeout: The total time to allow for the operation, in seconds + attempt_timeout: The time to allow for each individual attempt, in seconds + retryable_exceptions: A list of exceptions that should trigger a retry + """ + + __slots__ = ( + "attempt_timeout_gen", + "operation_timeout", + "request", + "target", + "_predicate", + "_last_yielded_row_key", + "_remaining_count", + ) + + def __init__( + self, + query: ReadRowsQuery, + target: TargetType, + operation_timeout: float, + attempt_timeout: float, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + self.attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.operation_timeout = operation_timeout + if isinstance(query, dict): + self.request = ReadRowsRequestPB( + **query, **target._request_path, app_profile_id=target.app_profile_id + ) + else: + self.request = query._to_pb(target) + self.target = target + self._predicate = retries.if_exception_type(*retryable_exceptions) + self._last_yielded_row_key: bytes | None = None + self._remaining_count: int | None = self.request.rows_limit or None + + def start_operation(self) -> CrossSync._Sync_Impl.Iterable[Row]: + """Start the read_rows operation, retrying on retryable errors. + + Yields: + Row: The next row in the stream""" + return CrossSync._Sync_Impl.retry_target_stream( + self._read_rows_attempt, + self._predicate, + exponential_sleep_generator(0.01, 60, multiplier=2), + self.operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def _read_rows_attempt(self) -> CrossSync._Sync_Impl.Iterable[Row]: + """Attempt a single read_rows rpc call. + This function is intended to be wrapped by retry logic, + which will call this function until it succeeds or + a non-retryable error is raised. + + Yields: + Row: The next row in the stream""" + if self._last_yielded_row_key is not None: + try: + self.request.rows = self._revise_request_rowset( + row_set=self.request.rows, + last_seen_row_key=self._last_yielded_row_key, + ) + except _RowSetComplete: + return self.merge_rows(None) + if self._remaining_count is not None: + self.request.rows_limit = self._remaining_count + if self._remaining_count == 0: + return self.merge_rows(None) + gapic_stream = self.target.client._gapic_client.read_rows( + self.request, timeout=next(self.attempt_timeout_gen), retry=None + ) + chunked_stream = self.chunk_stream(gapic_stream) + return self.merge_rows(chunked_stream) + + def chunk_stream( + self, + stream: CrossSync._Sync_Impl.Awaitable[ + CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB] + ], + ) -> CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk]: + """process chunks out of raw read_rows stream + + Args: + stream: the raw read_rows stream from the gapic client + Yields: + ReadRowsResponsePB.CellChunk: the next chunk in the stream""" + for resp in stream: + resp = resp._pb + if resp.last_scanned_row_key: + if ( + self._last_yielded_row_key is not None + and resp.last_scanned_row_key <= self._last_yielded_row_key + ): + raise InvalidChunk("last scanned out of order") + self._last_yielded_row_key = resp.last_scanned_row_key + current_key = None + for c in resp.chunks: + if current_key is None: + current_key = c.row_key + if current_key is None: + raise InvalidChunk("first chunk is missing a row key") + elif ( + self._last_yielded_row_key + and current_key <= self._last_yielded_row_key + ): + raise InvalidChunk("row keys should be strictly increasing") + yield c + if c.reset_row: + current_key = None + elif c.commit_row: + self._last_yielded_row_key = current_key + if self._remaining_count is not None: + self._remaining_count -= 1 + if self._remaining_count < 0: + raise InvalidChunk("emit count exceeds row limit") + current_key = None + + @staticmethod + def merge_rows( + chunks: CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk] | None, + ) -> CrossSync._Sync_Impl.Iterable[Row]: + """Merge chunks into rows + + Args: + chunks: the chunk stream to merge + Yields: + Row: the next row in the stream""" + if chunks is None: + return + it = chunks.__iter__() + while True: + try: + c = it.__next__() + except CrossSync._Sync_Impl.StopIteration: + return + row_key = c.row_key + if not row_key: + raise InvalidChunk("first row chunk is missing key") + cells = [] + family: str | None = None + qualifier: bytes | None = None + try: + while True: + if c.reset_row: + raise _ResetRow(c) + k = c.row_key + f = c.family_name.value + q = c.qualifier.value if c.HasField("qualifier") else None + if k and k != row_key: + raise InvalidChunk("unexpected new row key") + if f: + family = f + if q is not None: + qualifier = q + else: + raise InvalidChunk("new family without qualifier") + elif family is None: + raise InvalidChunk("missing family") + elif q is not None: + if family is None: + raise InvalidChunk("new qualifier without family") + qualifier = q + elif qualifier is None: + raise InvalidChunk("missing qualifier") + ts = c.timestamp_micros + labels = c.labels if c.labels else [] + value = c.value + if c.value_size > 0: + buffer = [value] + while c.value_size > 0: + c = it.__next__() + t = c.timestamp_micros + cl = c.labels + k = c.row_key + if ( + c.HasField("family_name") + and c.family_name.value != family + ): + raise InvalidChunk("family changed mid cell") + if ( + c.HasField("qualifier") + and c.qualifier.value != qualifier + ): + raise InvalidChunk("qualifier changed mid cell") + if t and t != ts: + raise InvalidChunk("timestamp changed mid cell") + if cl and cl != labels: + raise InvalidChunk("labels changed mid cell") + if k and k != row_key: + raise InvalidChunk("row key changed mid cell") + if c.reset_row: + raise _ResetRow(c) + buffer.append(c.value) + value = b"".join(buffer) + cells.append( + Cell(value, row_key, family, qualifier, ts, list(labels)) + ) + if c.commit_row: + yield Row(row_key, cells) + break + c = it.__next__() + except _ResetRow as e: + c = e.chunk + if ( + c.row_key + or c.HasField("family_name") + or c.HasField("qualifier") + or c.timestamp_micros + or c.labels + or c.value + ): + raise InvalidChunk("reset row with data") + continue + except CrossSync._Sync_Impl.StopIteration: + raise InvalidChunk("premature end of stream") + + @staticmethod + def _revise_request_rowset(row_set: RowSetPB, last_seen_row_key: bytes) -> RowSetPB: + """Revise the rows in the request to avoid ones we've already processed. + + Args: + row_set: the row set from the request + last_seen_row_key: the last row key encountered + Returns: + RowSetPB: the new rowset after adusting for the last seen key + Raises: + _RowSetComplete: if there are no rows left to process after the revision""" + if row_set is None or (not row_set.row_ranges and (not row_set.row_keys)): + last_seen = last_seen_row_key + return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)]) + adjusted_keys: list[bytes] = [ + k for k in row_set.row_keys if k > last_seen_row_key + ] + adjusted_ranges: list[RowRangePB] = [] + for row_range in row_set.row_ranges: + end_key = row_range.end_key_closed or row_range.end_key_open or None + if end_key is None or end_key > last_seen_row_key: + new_range = RowRangePB(row_range) + start_key = row_range.start_key_closed or row_range.start_key_open + if start_key is None or start_key <= last_seen_row_key: + new_range.start_key_open = last_seen_row_key + adjusted_ranges.append(new_range) + if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: + raise _RowSetComplete() + return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges) diff --git a/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py b/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py new file mode 100644 index 000000000..78ba129d9 --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py @@ -0,0 +1,96 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Callable +from grpc import ChannelConnectivity +from grpc import Channel + + +class _WrappedChannel(Channel): + """ + A wrapper around a gRPC channel. All methods are passed + through to the underlying channel. + """ + + def __init__(self, channel: Channel): + self._channel = channel + + def unary_unary(self, *args, **kwargs): + return self._channel.unary_unary(*args, **kwargs) + + def unary_stream(self, *args, **kwargs): + return self._channel.unary_stream(*args, **kwargs) + + def stream_unary(self, *args, **kwargs): + return self._channel.stream_unary(*args, **kwargs) + + def stream_stream(self, *args, **kwargs): + return self._channel.stream_stream(*args, **kwargs) + + def channel_ready(self): + return self._channel.channel_ready() + + def __enter__(self): + self._channel.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self._channel.__exit__(exc_type, exc_val, exc_tb) + + def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity: + return self._channel.get_state(try_to_connect=try_to_connect) + + def wait_for_state_change(self, last_observed_state): + return self._channel.wait_for_state_change(last_observed_state) + + def __getattr__(self, name): + return getattr(self._channel, name) + + def close(self, grace=None): + return self._channel.close() + + def subscribe(self, callback, try_to_connect=False): + return self._channel.subscribe(callback, try_to_connect) + + def unsubscribe(self, callback): + return self._channel.unsubscribe(callback) + + +class SwappableChannel(_WrappedChannel): + """ + Provides a grpc channel wrapper, that allows the internal channel to be swapped out + + Args: + - channel_fn: a nullary function that returns a new channel instance. + It should be a partial with all channel configuration arguments built-in + """ + + def __init__(self, channel_fn: Callable[[], Channel]): + self._channel_fn = channel_fn + self._channel = channel_fn() + + def create_channel(self) -> Channel: + """Create a fresh channel using the stored `channel_fn` partial""" + new_channel = self._channel_fn() + return new_channel + + def swap_channel(self, new_channel: Channel) -> Channel: + """Replace the wrapped channel with a new instance. Typically created using `create_channel`""" + old_channel = self._channel + self._channel = new_channel + return old_channel diff --git a/google/cloud/bigtable/data/_sync_autogen/client.py b/google/cloud/bigtable/data/_sync_autogen/client.py new file mode 100644 index 000000000..622002763 --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/client.py @@ -0,0 +1,1589 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import cast, Any, Callable, Optional, Set, Sequence, TYPE_CHECKING +import abc +import time +import warnings +import random +import os +import concurrent.futures +from functools import partial +from grpc import Channel +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import ( + SqlType, + _pb_metadata_to_metadata_types, +) +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, + _to_param_types, +) +from google.cloud.bigtable_v2.services.bigtable.transports.base import ( + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.api_core import retry as retries +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import Cancelled +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts +from google.cloud.bigtable.data._helpers import _WarmedInstanceKey +from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._helpers import _validate_timeouts +from google.cloud.bigtable.data._helpers import _get_error_type +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain +from google.cloud.bigtable.data._metrics import BigtableClientSideMetricsController +from google.cloud.bigtable.data._cross_sync import CrossSync +from typing import Iterable +from grpc import insecure_channel +from grpc import intercept_channel +from google.cloud.bigtable_v2.services.bigtable.transports import ( + BigtableGrpcTransport as TransportType, +) +from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient +from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE +from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as SwappableChannelType, +) +from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor as MetricsInterceptorType, +) + +if TYPE_CHECKING: + from google.cloud.bigtable.data._helpers import RowKeySamples + from google.cloud.bigtable.data._helpers import ShardedQuery + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import ( + MutationsBatcher, + ) + from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( + ExecuteQueryIterator, + ) + + +@CrossSync._Sync_Impl.add_mapping_decorator("DataClient") +class BigtableDataClient(ClientWithProject): + def __init__( + self, + *, + project: str | None = None, + credentials: google.auth.credentials.Credentials | None = None, + client_options: dict[str, Any] + | "google.api_core.client_options.ClientOptions" + | None = None, + **kwargs, + ): + """Create a client instance for the Bigtable Data API + + + + Args: + project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + credentials: + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + client_options: + Client options used to set user options + on the client. API Endpoint should be set through client_options. + Raises: + """ + if "pool_size" in kwargs: + warnings.warn("pool_size no longer supported") + self.client_info = DEFAULT_CLIENT_INFO + self.client_info.client_library_version = self._client_version() + if type(client_options) is dict: + client_options = client_options_lib.from_dict(client_options) + client_options = cast( + Optional[client_options_lib.ClientOptions], client_options + ) + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + warnings.warn( + "Connecting to Bigtable emulator at {}".format(self._emulator_host), + RuntimeWarning, + stacklevel=2, + ) + if credentials is None: + credentials = google.auth.credentials.AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + self._metrics_interceptor = MetricsInterceptorType() + ClientWithProject.__init__( + self, + credentials=credentials, + project=project, + client_options=client_options, + ) + self._gapic_client = GapicClient( + credentials=credentials, + client_options=client_options, + client_info=self.client_info, + transport=lambda *args, **kwargs: TransportType( + *args, **kwargs, channel=self._build_grpc_channel + ), + ) + if ( + credentials + and credentials.universe_domain != self.universe_domain + and (self._emulator_host is None) + ): + raise ValueError( + f"The configured universe domain ({self.universe_domain}) does not match the universe domain found in the credentials ({self._credentials.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + self._is_closed = CrossSync._Sync_Impl.Event() + self.transport = cast(TransportType, self._gapic_client.transport) + self._active_instances: Set[_WarmedInstanceKey] = set() + self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} + self._channel_init_time = time.monotonic() + self._channel_refresh_task: CrossSync._Sync_Impl.Task[None] | None = None + self._executor: concurrent.futures.ThreadPoolExecutor | None = ( + concurrent.futures.ThreadPoolExecutor() + if not CrossSync._Sync_Impl.is_async + else None + ) + if self._emulator_host is None: + try: + self._start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) + + def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType: + """This method is called by the gapic transport to create a grpc channel. + + The init arguments passed down are captured in a partial used by SwappableChannel + to create new channel instances in the future, as part of the channel refresh logic + + Emulators always use an inseucre channel + + Args: + - *args: positional arguments passed by the gapic layer to create a new channel with + - **kwargs: keyword arguments passed by the gapic layer to create a new channel with + Returns: + a custom wrapped swappable channel""" + create_channel_fn: Callable[[], Channel] + if self._emulator_host is not None: + create_channel_fn = partial(insecure_channel, self._emulator_host) + else: + + def sync_create_channel_fn(): + return intercept_channel( + TransportType.create_channel(*args, **kwargs), + self._metrics_interceptor, + ) + + create_channel_fn = sync_create_channel_fn + new_channel = SwappableChannelType(create_channel_fn) + return new_channel + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance.""" + return self._gapic_client.universe_domain + + @property + def api_endpoint(self) -> str: + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance.""" + return self._gapic_client.api_endpoint + + @staticmethod + def _client_version() -> str: + """Helper function to return the client version string for this client""" + version_str = f"{google.cloud.bigtable.__version__}-data" + return version_str + + def _start_background_channel_refresh(self) -> None: + """Starts a background task to ping and warm grpc channel + + Raises: + None""" + if ( + not self._channel_refresh_task + and (not self._emulator_host) + and (not self._is_closed.is_set()) + ): + CrossSync._Sync_Impl.verify_async_event_loop() + self._channel_refresh_task = CrossSync._Sync_Impl.create_task( + self._manage_channel, + sync_executor=self._executor, + task_name=f"{self.__class__.__name__} channel refresh", + ) + + def close(self, timeout: float | None = 2.0): + """Cancel all background tasks""" + self._is_closed.set() + if self._channel_refresh_task is not None: + self._channel_refresh_task.cancel() + CrossSync._Sync_Impl.wait([self._channel_refresh_task], timeout=timeout) + self.transport.close() + if self._executor: + self._executor.shutdown(wait=False) + self._channel_refresh_task = None + + def _ping_and_warm_instances( + self, + instance_key: _WarmedInstanceKey | None = None, + channel: Channel | None = None, + ) -> list[BaseException | None]: + """Prepares the backend for requests on a channel + + Pings each Bigtable instance registered in `_active_instances` on the client + + Args: + instance_key: if provided, only warm the instance associated with the key + channel: grpc channel to warm. If none, warms `self.transport.grpc_channel` + Returns: + list[BaseException | None]: sequence of results or exceptions from the ping requests + """ + channel = channel or self.transport.grpc_channel + instance_list = ( + [instance_key] if instance_key is not None else self._active_instances + ) + ping_rpc = channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=PingAndWarmRequest.serialize, + ) + partial_list = [ + partial( + ping_rpc, + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, + ) + for (instance_name, app_profile_id) in instance_list + ] + result_list = CrossSync._Sync_Impl.gather_partials( + partial_list, return_exceptions=True, sync_executor=self._executor + ) + return [r or None for r in result_list] + + def _invalidate_channel_stubs(self): + """Helper to reset the cached stubs. Needed when changing out the grpc channel""" + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) + + def _manage_channel( + self, + refresh_interval_min: float = 60 * 35, + refresh_interval_max: float = 60 * 45, + grace_period: float = 60 * 10, + ) -> None: + """Background task that periodically refreshes and warms a grpc channel + + The backend will automatically close channels after 60 minutes, so + `refresh_interval` + `grace_period` should be < 60 minutes + + Runs continuously until the client is closed + + Args: + refresh_interval_min: minimum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + refresh_interval_max: maximum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + grace_period: time to allow previous channel to serve existing + requests before closing, in seconds""" + if not isinstance(self.transport.grpc_channel, SwappableChannelType): + warnings.warn("Channel does not support auto-refresh.") + return + super_channel: SwappableChannelType = self.transport.grpc_channel + first_refresh = self._channel_init_time + random.uniform( + refresh_interval_min, refresh_interval_max + ) + next_sleep = max(first_refresh - time.monotonic(), 0) + if next_sleep > 0: + self._ping_and_warm_instances(channel=super_channel) + while not self._is_closed.is_set(): + CrossSync._Sync_Impl.event_wait( + self._is_closed, next_sleep, async_break_early=False + ) + if self._is_closed.is_set(): + break + start_timestamp = time.monotonic() + new_channel = super_channel.create_channel() + self._ping_and_warm_instances(channel=new_channel) + old_channel = super_channel.swap_channel(new_channel) + self._invalidate_channel_stubs() + if grace_period: + CrossSync._Sync_Impl.event_wait( + self._is_closed, grace_period, async_break_early=False + ) + old_channel.close() + next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) + next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) + + def _register_instance( + self, instance_id: str, app_profile_id: Optional[str], owner_id: int + ) -> None: + """Registers an instance with the client, and warms the channel for the instance + The client will periodically refresh grpc channel used to make + requests, and new channels will be warmed for each registered instance + Channels will not be refreshed unless at least one instance is registered + + Args: + instance_id: id of the instance to register. + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration. Can be obtained by calling + `id` identity funcion, using `id(owner)`""" + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + self._instance_owners.setdefault(instance_key, set()).add(owner_id) + if instance_key not in self._active_instances: + self._active_instances.add(instance_key) + if self._channel_refresh_task: + self._ping_and_warm_instances(instance_key) + else: + self._start_background_channel_refresh() + + def _remove_instance_registration( + self, instance_id: str, app_profile_id: Optional[str], owner_id: int + ) -> bool: + """Removes an instance from the client's registered instances, to prevent + warming new channels for the instance + + If instance_id is not registered, or is still in use by other tables, returns False + + Args: + instance_id: id of the instance to remove + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Can be + obtained by the `id` identity funcion, using `id(owner)`. + Returns: + bool: True if instance was removed, else False""" + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + owner_list = self._instance_owners.get(instance_key, set()) + try: + owner_list.remove(owner_id) + if len(owner_list) == 0: + self._active_instances.remove(instance_key) + return True + except KeyError: + return False + + def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> Table: + """Returns a table instance for making data API requests. All arguments are passed + directly to the Table constructor. + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Returns: + Table: a table instance for making data API requests + Raises: + None""" + return Table(self, instance_id, table_id, *args, **kwargs) + + def get_authorized_view( + self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs + ) -> AuthorizedView: + """Returns an authorized view instance for making data API requests. All arguments are passed + directly to the AuthorizedView constructor. + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to Table's value + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults Table's value + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to Table's value + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults Table's value + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to Table's value + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to Table's value + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. If not set, + defaults to Table's value + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. If not set, + defaults to Table's value + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. If not set, defaults to + Table's value + Returns: + AuthorizedView: a table instance for making data API requests + Raises: + None""" + return CrossSync._Sync_Impl.AuthorizedView( + self, instance_id, table_id, authorized_view_id, *args, **kwargs + ) + + def execute_query( + self, + query: str, + instance_id: str, + *, + parameters: dict[str, ExecuteQueryValueType] | None = None, + parameter_types: dict[str, SqlType.Type] | None = None, + app_profile_id: str | None = None, + operation_timeout: float = 600, + attempt_timeout: float | None = 20, + retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + prepare_operation_timeout: float = 60, + prepare_attempt_timeout: float | None = 20, + prepare_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> "ExecuteQueryIterator": + """Executes an SQL query on an instance. + Returns an iterator to asynchronously stream back columns from selected rows. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``. + These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the + work happens. + + Args: + query: Query to be run on Bigtable instance. The query can use ``@param`` + placeholders to use parameter interpolation on the server. Values for all + parameters should be provided in ``parameters``. Types of parameters are + inferred but should be provided in ``parameter_types`` if the inference is + not possible (i.e. when value can be None, an empty list or an empty dict). + instance_id: The Bigtable instance ID to perform the query on. + instance_id is combined with the client's project to fully + specify the instance. + parameters: Dictionary with values for all parameters used in the ``query``. + parameter_types: Dictionary with types of parameters used in the ``query``. + Required to contain entries only for parameters whose type cannot be + detected automatically (i.e. the value can be None, an empty list or + an empty dict). + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + operation_timeout: the time budget for the entire executeQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + attempt_timeout: the time budget for an individual executeQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered during executeQuery. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 60 seconds. + prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to prepare_operation_timeout. + prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects. + This dictionary provides the necessary type information for deserializing PROTO and + ENUM column values from the query results. When an entry is provided + for a PROTO or ENUM column, the client library will attempt to deserialize the raw data. + + - For PROTO columns: The value in the dictionary should be the + Protobuf Message class (e.g., ``my_pb2.MyMessage``). + - For ENUM columns: The value should be the Protobuf EnumTypeWrapper + object (e.g., ``my_pb2.MyEnum``). + + Example:: + + import my_pb2 + + column_info = { + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum + } + + If ``column_info`` is not provided, or if a specific column name is not found + in the dictionary: + + - PROTO columns will be returned as raw bytes. + - ENUM columns will be returned as integers. + + Note for Nested PROTO or ENUM Fields: + + To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated + path from the top-level column name. + + - For STRUCTs: ``struct_column_name.field_name`` + - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types + for the map keys or values, respectively. + + Example:: + + import my_pb2 + + column_info = { + # Top-level column + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum, + + # Nested field in a STRUCT column named 'my_struct' + "my_struct.nested_proto_field": my_pb2.OtherMessage, + "my_struct.nested_enum_field": my_pb2.AnotherEnum, + + # Nested field in a MAP column named 'my_map' + "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums + "my_map.value": my_pb2.MapValueMessage, + + # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column + "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto, + "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum + } + + Returns: + ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if + a parameter is passed without an explicit type, and the type cannot be infered + google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails. + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + converted_param_types = _to_param_types(parameters, parameter_types) + prepare_request = { + "instance_name": instance_name, + "query": query, + "app_profile_id": app_profile_id, + "param_types": converted_param_types, + "proto_format": {}, + } + prepare_predicate = retries.if_exception_type( + *[_get_error_type(e) for e in prepare_retryable_errors] + ) + (prepare_operation_timeout, prepare_attempt_timeout) = _align_timeouts( + prepare_operation_timeout, prepare_attempt_timeout + ) + prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + target = partial( + self._gapic_client.prepare_query, + request=prepare_request, + timeout=prepare_attempt_timeout, + retry=None, + ) + prepare_result = CrossSync._Sync_Impl.retry_target( + target, + prepare_predicate, + prepare_sleep_generator, + prepare_operation_timeout, + exception_factory=_retry_exception_factory, + ) + prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata) + retryable_excs = [_get_error_type(e) for e in retryable_errors] + pb_params = _format_execute_query_params(parameters, parameter_types) + request_body = { + "instance_name": instance_name, + "app_profile_id": app_profile_id, + "prepared_query": prepare_result.prepared_query, + "params": pb_params, + } + (operation_timeout, attempt_timeout) = _align_timeouts( + operation_timeout, attempt_timeout + ) + return CrossSync._Sync_Impl.ExecuteQueryIterator( + self, + instance_id, + app_profile_id, + request_body, + prepare_metadata, + attempt_timeout, + operation_timeout, + retryable_excs=retryable_excs, + column_info=column_info, + ) + + def __enter__(self): + self._start_background_channel_refresh() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + self._gapic_client.__exit__(exc_type, exc_val, exc_tb) + + +class _DataApiTarget(abc.ABC): + """ + Abstract class containing API surface for BigtableDataClient. Should not be created directly + + Can be instantiated as a Table or an AuthorizedView + """ + + def __init__( + self, + client: BigtableDataClient, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + *, + default_read_rows_operation_timeout: float = 600, + default_read_rows_attempt_timeout: float | None = 20, + default_mutate_rows_operation_timeout: float = 600, + default_mutate_rows_attempt_timeout: float | None = 60, + default_operation_timeout: float = 60, + default_attempt_timeout: float | None = 20, + default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + Cancelled, + ), + default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + default_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + ): + """Initialize a Table instance + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + None""" + _validate_timeouts( + default_operation_timeout, default_attempt_timeout, allow_none=True + ) + _validate_timeouts( + default_read_rows_operation_timeout, + default_read_rows_attempt_timeout, + allow_none=True, + ) + _validate_timeouts( + default_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout, + allow_none=True, + ) + self.client = client + self.instance_id = instance_id + self.instance_name = self.client._gapic_client.instance_path( + self.client.project, instance_id + ) + self.table_id = table_id + self.table_name = self.client._gapic_client.table_path( + self.client.project, instance_id, table_id + ) + self.app_profile_id: str | None = app_profile_id + self.default_operation_timeout: float = default_operation_timeout + self.default_attempt_timeout: float | None = default_attempt_timeout + self.default_read_rows_operation_timeout: float = ( + default_read_rows_operation_timeout + ) + self.default_read_rows_attempt_timeout: float | None = ( + default_read_rows_attempt_timeout + ) + self.default_mutate_rows_operation_timeout: float = ( + default_mutate_rows_operation_timeout + ) + self.default_mutate_rows_attempt_timeout: float | None = ( + default_mutate_rows_attempt_timeout + ) + self.default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + default_read_rows_retryable_errors or () + ) + self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + default_mutate_rows_retryable_errors or () + ) + self.default_retryable_errors: Sequence[type[Exception]] = ( + default_retryable_errors or () + ) + self._metrics = BigtableClientSideMetricsController() + try: + self._register_instance_future = CrossSync._Sync_Impl.create_task( + self.client._register_instance, + self.instance_id, + self.app_profile_id, + id(self), + sync_executor=self.client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + @abc.abstractmethod + def _request_path(self) -> dict[str, str]: + """Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass + + Unimplemented in base class""" + raise NotImplementedError + + def __str__(self): + path_str = list(self._request_path.values())[0] if self._request_path else "" + return f"{self.__class__.__name__}<{path_str!r}>" + + def read_rows_stream( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Iterable[Row]: + """Read a set of rows from the table, based on the specified query. + Returns an iterator to asynchronously stream back row data. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors + Returns: + Iterable[Row]: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + row_merger = CrossSync._Sync_Impl._ReadRowsOperation( + query, + self, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_exceptions=retryable_excs, + ) + return row_merger.start_operation() + + def read_rows( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """Read a set of rows from the table, based on the specified query. + Retruns results as a list of Row objects when the request is complete. + For streamed results, use read_rows_stream. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + If None, defaults to the Table's default_read_rows_attempt_timeout, + or the operation_timeout if that is also None. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + row_generator = self.read_rows_stream( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return [row for row in row_generator] + + def read_row( + self, + row_key: str | bytes, + *, + row_filter: RowFilter | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Row | None: + """Read a single row from the table, based on the specified key. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + Row | None: a Row object if the row exists, otherwise None + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) + results = self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + if len(results) == 0: + return None + return results[0] + + def read_rows_sharded( + self, + sharded_query: ShardedQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """Runs a sharded query in parallel, then return the results in a single list. + Results will be returned in the order of the input queries. + + This function is intended to be run on the results on a query.shard() call. + For example:: + + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) + + Args: + sharded_query: a sharded query to execute + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + ShardedReadRowsExceptionGroup: if any of the queries failed + ValueError: if the query_list is empty""" + if not sharded_query: + raise ValueError("empty sharded_query") + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + rpc_timeout_generator = _attempt_timeout_generator( + operation_timeout, operation_timeout + ) + concurrency_sem = CrossSync._Sync_Impl.Semaphore(_CONCURRENCY_LIMIT) + + def read_rows_with_semaphore(query): + with concurrency_sem: + shard_timeout = next(rpc_timeout_generator) + if shard_timeout <= 0: + raise DeadlineExceeded( + "Operation timeout exceeded before starting query" + ) + return self.read_rows( + query, + operation_timeout=shard_timeout, + attempt_timeout=min(attempt_timeout, shard_timeout), + retryable_errors=retryable_errors, + ) + + routine_list = [ + partial(read_rows_with_semaphore, query) for query in sharded_query + ] + batch_result = CrossSync._Sync_Impl.gather_partials( + routine_list, return_exceptions=True, sync_executor=self.client._executor + ) + error_dict = {} + shard_idx = 0 + results_list = [] + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + elif isinstance(result, BaseException): + raise result + else: + results_list.extend(result) + shard_idx += 1 + if error_dict: + raise ShardedReadRowsExceptionGroup( + [ + FailedQueryShardError(idx, sharded_query[idx], e) + for (idx, e) in error_dict.items() + ], + results_list, + len(sharded_query), + ) + return results_list + + def row_exists( + self, + row_key: str | bytes, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> bool: + """Return a boolean indicating whether the specified row exists in the table. + uses the filters: chain(limit cells per row = 1, strip value) + + Args: + row_key: the key of the row to check + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + bool: a bool indicating whether the row exists + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + strip_filter = StripValueTransformerFilter(flag=True) + limit_filter = CellsRowLimitFilter(1) + chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) + query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) + results = self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return len(results) > 0 + + def sample_row_keys( + self, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> RowKeySamples: + """Return a set of RowKeySamples that delimit contiguous sections of the table of + approximately equal size + + RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that + can be parallelized across multiple backend nodes read_rows and read_rows_stream + requests will call sample_row_keys internally for this purpose when sharding is enabled + + RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of + row_keys, along with offset positions in the table + + Args: + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget.i + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_retryable_errors. + Returns: + RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + predicate = retries.if_exception_type(*retryable_excs) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + def execute_rpc(): + results = self.client._gapic_client.sample_row_keys( + request=SampleRowKeysRequest( + app_profile_id=self.app_profile_id, **self._request_path + ), + timeout=next(attempt_timeout_gen), + retry=None, + ) + return [(s.row_key, s.offset_bytes) for s in results] + + return CrossSync._Sync_Impl.retry_target( + execute_rpc, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def mutations_batcher( + self, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ) -> "MutationsBatcher": + """Returns a new mutations batcher instance. + + Can be used to iteratively add mutations that are flushed as a group, + to avoid excess network calls + + Args: + flush_interval: Automatically flush every flush_interval seconds. If None, + a table default will be used + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + Defaults to the Table's default_mutate_rows_operation_timeout + batch_attempt_timeout: timeout for each individual request, in seconds. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + Returns: + MutationsBatcher: a MutationsBatcher context manager that can batch requests + """ + return CrossSync._Sync_Impl.MutationsBatcher( + self, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_mutation_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=batch_operation_timeout, + batch_attempt_timeout=batch_attempt_timeout, + batch_retryable_errors=batch_retryable_errors, + ) + + def mutate_row( + self, + row_key: str | bytes, + mutations: list[Mutation] | Mutation, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ): + """Mutates a row atomically. + + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. + + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. + + Args: + row_key: the row to apply mutations to + mutations: the set of mutations to apply to the row + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + ValueError: if invalid arguments are provided""" + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + if not mutations: + raise ValueError("No mutations provided") + mutations_list = mutations if isinstance(mutations, list) else [mutations] + if all((mutation.is_idempotent() for mutation in mutations_list)): + predicate = retries.if_exception_type( + *_get_retryable_errors(retryable_errors, self) + ) + else: + predicate = retries.if_exception_type() + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + target = partial( + self.client._gapic_client.mutate_row, + request=MutateRowRequest( + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=attempt_timeout, + retry=None, + ) + return CrossSync._Sync_Impl.retry_target( + target, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def bulk_mutate_rows( + self, + mutation_entries: list[RowMutationEntry], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + """Applies mutations for multiple rows in a single batched request. + + Each individual RowMutationEntry is applied atomically, but separate entries + may be applied in arbitrary order (even for entries targetting the same row) + In total, the row_mutations can contain at most 100000 individual mutations + across all entries + + Idempotent entries (i.e., entries with mutations with explicit timestamps) + will be retried on failure. Non-idempotent will not, and will reported in a + raised exception group + + Args: + mutation_entries: the batches of mutations to apply + Each entry will be applied atomically, but entries will be applied + in arbitrary order + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_mutate_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors + Raises: + MutationsExceptionGroup: if one or more mutations fails + Contains details about any failed entries in .exceptions + ValueError: if invalid arguments are provided""" + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + operation = CrossSync._Sync_Impl._MutateRowsOperation( + self.client._gapic_client, + self, + mutation_entries, + operation_timeout, + attempt_timeout, + retryable_exceptions=retryable_excs, + ) + operation.start() + + def check_and_mutate_row( + self, + row_key: str | bytes, + predicate: RowFilter | None, + *, + true_case_mutations: Mutation | list[Mutation] | None = None, + false_case_mutations: Mutation | list[Mutation] | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> bool: + """Mutates a row atomically based on the output of a predicate filter + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to mutate + predicate: the filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, + either true_case_mutations or false_case_mutations will be executed. + If None, checks that the row contains any values at all. + true_case_mutations: + Changes to be atomically applied to the specified row if + predicate yields at least one cell when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + false_case_mutations is empty, and at most 100000. + false_case_mutations: + Changes to be atomically applied to the specified row if + predicate_filter does not yield any cells when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + `true_case_mutations` is empty, and at most 100000. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. Defaults to the Table's default_operation_timeout + Returns: + bool indicating whether the predicate was true or false + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call""" + (operation_timeout, _) = _get_timeouts(operation_timeout, None, self) + if true_case_mutations is not None and ( + not isinstance(true_case_mutations, list) + ): + true_case_mutations = [true_case_mutations] + true_case_list = [m._to_pb() for m in true_case_mutations or []] + if false_case_mutations is not None and ( + not isinstance(false_case_mutations, list) + ): + false_case_mutations = [false_case_mutations] + false_case_list = [m._to_pb() for m in false_case_mutations or []] + result = self.client._gapic_client.check_and_mutate_row( + request=CheckAndMutateRowRequest( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=operation_timeout, + retry=None, + ) + return result.predicate_matched + + def read_modify_write_row( + self, + row_key: str | bytes, + rules: ReadModifyWriteRule | list[ReadModifyWriteRule], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> Row: + """Reads and modifies a row atomically according to input ReadModifyWriteRules, + and returns the contents of all modified cells + + The new value for the timestamp is the greater of the existing timestamp or + the current server time. + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to apply read/modify/write rules to + rules: A rule or set of rules to apply to the row. + Rules are applied in order, meaning that earlier rules will affect the + results of later ones. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Defaults to the Table's default_operation_timeout. + Returns: + Row: a Row containing cell data that was modified as part of the operation + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call + ValueError: if invalid arguments are provided""" + (operation_timeout, _) = _get_timeouts(operation_timeout, None, self) + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if rules is not None and (not isinstance(rules, list)): + rules = [rules] + if not rules: + raise ValueError("rules must contain at least one item") + result = self.client._gapic_client.read_modify_write_row( + request=ReadModifyWriteRowRequest( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), + timeout=operation_timeout, + retry=None, + ) + return Row._from_pb(result.row) + + def close(self): + """Called to close the Table instance and release any resources held by it.""" + self._metrics.close() + if self._register_instance_future: + self._register_instance_future.cancel() + self.client._remove_instance_registration( + self.instance_id, self.app_profile_id, id(self) + ) + + def __enter__(self): + """Implement async context manager protocol + + Ensure registration task has time to run, so that + grpc channels will be warmed for the specified instance""" + if self._register_instance_future: + self._register_instance_future + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Implement async context manager protocol + + Unregister this instance with the client, so that + grpc channels will no longer be warmed""" + self.close() + + +@CrossSync._Sync_Impl.add_mapping_decorator("Table") +class Table(_DataApiTarget): + """ + Main Data API surface for interacting with a Bigtable table. + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + @property + def _request_path(self) -> dict[str, str]: + return {"table_name": self.table_name} + + +@CrossSync._Sync_Impl.add_mapping_decorator("AuthorizedView") +class AuthorizedView(_DataApiTarget): + """ + Provides access to an authorized view of a table. + + An authorized view is a subset of a table that you configure to include specific table data. + Then you grant access to the authorized view separately from access to the table. + + AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context, + and passed them with each call + """ + + def __init__( + self, + client, + instance_id, + table_id, + authorized_view_id, + app_profile_id: str | None = None, + **kwargs, + ): + """Initialize an AuthorizedView instance + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + None""" + super().__init__(client, instance_id, table_id, app_profile_id, **kwargs) + self.authorized_view_id = authorized_view_id + self.authorized_view_name: str = self.client._gapic_client.authorized_view_path( + self.client.project, instance_id, table_id, authorized_view_id + ) + + @property + def _request_path(self) -> dict[str, str]: + return {"authorized_view_name": self.authorized_view_name} diff --git a/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py new file mode 100644 index 000000000..c5a59787c --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py @@ -0,0 +1,126 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence +import time +from functools import wraps +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import OperationState +from google.cloud.bigtable.data._metrics.data_model import OperationType +from grpc import UnaryUnaryClientInterceptor +from grpc import UnaryStreamClientInterceptor + + +def _with_active_operation(func): + """Decorator for interceptor methods to extract the active operation associated with the + in-scope contextvars, and pass it to the decorated function.""" + + @wraps(func) + def wrapper(self, continuation, client_call_details, request): + operation: ActiveOperationMetric | None = ActiveOperationMetric.from_context() + if operation: + if ( + operation.state == OperationState.CREATED + or operation.state == OperationState.BETWEEN_ATTEMPTS + ): + operation.start_attempt() + return func(self, operation, continuation, client_call_details, request) + else: + return continuation(client_call_details, request) + + return wrapper + + +def _get_metadata(source) -> dict[str, str | bytes] | None: + """Helper to extract metadata from a call or RpcError""" + try: + metadata: Sequence[tuple[str, str | bytes]] + metadata = source.trailing_metadata() + source.initial_metadata() + return {k: v for (k, v) in metadata} + except Exception: + return None + + +class BigtableMetricsInterceptor( + UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor +): + """ + An async gRPC interceptor to add client metadata and print server metadata. + """ + + @_with_active_operation + def intercept_unary_unary( + self, operation, continuation, client_call_details, request + ): + """Interceptor for unary rpcs: + - MutateRow + - CheckAndMutateRow + - ReadModifyWriteRow""" + metadata = None + try: + call = continuation(client_call_details, request) + metadata = _get_metadata(call) + return call + except Exception as rpc_error: + metadata = _get_metadata(rpc_error) + raise rpc_error + finally: + if metadata is not None: + operation.add_response_metadata(metadata) + + @_with_active_operation + def intercept_unary_stream( + self, operation, continuation, client_call_details, request + ): + """Interceptor for streaming rpcs: + - ReadRows + - MutateRows + - SampleRowKeys""" + try: + return self._streaming_generator_wrapper( + operation, continuation(client_call_details, request) + ) + except Exception as rpc_error: + metadata = _get_metadata(rpc_error) + if metadata is not None: + operation.add_response_metadata(metadata) + raise rpc_error + + @staticmethod + def _streaming_generator_wrapper(operation, call): + """Wrapped generator to be returned by intercept_unary_stream.""" + has_first_response = ( + operation.first_response_latency_ns is not None + or operation.op_type != OperationType.READ_ROWS + ) + encountered_exc = None + try: + for response in call: + if not has_first_response: + operation.first_response_latency_ns = ( + time.monotonic_ns() - operation.start_time_ns + ) + has_first_response = True + yield response + except Exception as e: + encountered_exc = e + raise + finally: + if call is not None: + metadata = _get_metadata(encountered_exc or call) + if metadata is not None: + operation.add_response_metadata(metadata) diff --git a/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py b/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py new file mode 100644 index 000000000..84f0ba8c0 --- /dev/null +++ b/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py @@ -0,0 +1,451 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING, cast +import atexit +import warnings +from collections import deque +import concurrent.futures +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) +_MB_SIZE = 1024 * 1024 + + +@CrossSync._Sync_Impl.add_mapping_decorator("_FlowControl") +class _FlowControl: + """ + Manages flow control for batched mutations. Mutations are registered against + the FlowControl object before being sent, which will block if size or count + limits have reached capacity. As mutations completed, they are removed from + the FlowControl object, which will notify any blocked requests that there + is additional capacity. + + Flow limits are not hard limits. If a single mutation exceeds the configured + limits, it will be allowed as a single batch when the capacity is available. + + Args: + max_mutation_count: maximum number of mutations to send in a single rpc. + This corresponds to individual mutations in a single RowMutationEntry. + max_mutation_bytes: maximum number of bytes to send in a single rpc. + Raises: + ValueError: if max_mutation_count or max_mutation_bytes is less than 0 + """ + + def __init__(self, max_mutation_count: int, max_mutation_bytes: int): + self._max_mutation_count = max_mutation_count + self._max_mutation_bytes = max_mutation_bytes + if self._max_mutation_count < 1: + raise ValueError("max_mutation_count must be greater than 0") + if self._max_mutation_bytes < 1: + raise ValueError("max_mutation_bytes must be greater than 0") + self._capacity_condition = CrossSync._Sync_Impl.Condition() + self._in_flight_mutation_count = 0 + self._in_flight_mutation_bytes = 0 + + def _has_capacity(self, additional_count: int, additional_size: int) -> bool: + """Checks if there is capacity to send a new entry with the given size and count + + FlowControl limits are not hard limits. If a single mutation exceeds + the configured flow limits, it will be sent in a single batch when + previous batches have completed. + + Args: + additional_count: number of mutations in the pending entry + additional_size: size of the pending entry + Returns: + bool: True if there is capacity to send the pending entry, False otherwise + """ + acceptable_size = max(self._max_mutation_bytes, additional_size) + acceptable_count = max(self._max_mutation_count, additional_count) + new_size = self._in_flight_mutation_bytes + additional_size + new_count = self._in_flight_mutation_count + additional_count + return new_size <= acceptable_size and new_count <= acceptable_count + + def remove_from_flow( + self, mutations: RowMutationEntry | list[RowMutationEntry] + ) -> None: + """Removes mutations from flow control. This method should be called once + for each mutation that was sent to add_to_flow, after the corresponding + operation is complete. + + Args: + mutations: mutation or list of mutations to remove from flow control""" + if not isinstance(mutations, list): + mutations = [mutations] + total_count = sum((len(entry.mutations) for entry in mutations)) + total_size = sum((entry.size() for entry in mutations)) + self._in_flight_mutation_count -= total_count + self._in_flight_mutation_bytes -= total_size + with self._capacity_condition: + self._capacity_condition.notify_all() + + def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): + """Generator function that registers mutations with flow control. As mutations + are accepted into the flow control, they are yielded back to the caller, + to be sent in a batch. If the flow control is at capacity, the generator + will block until there is capacity available. + + Args: + mutations: list mutations to break up into batches + Yields: + list[RowMutationEntry]: + list of mutations that have reserved space in the flow control. + Each batch contains at least one mutation.""" + if not isinstance(mutations, list): + mutations = [mutations] + start_idx = 0 + end_idx = 0 + while end_idx < len(mutations): + start_idx = end_idx + batch_mutation_count = 0 + with self._capacity_condition: + while end_idx < len(mutations): + next_entry = mutations[end_idx] + next_size = next_entry.size() + next_count = len(next_entry.mutations) + if ( + self._has_capacity(next_count, next_size) + and batch_mutation_count + next_count + <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + ): + end_idx += 1 + batch_mutation_count += next_count + self._in_flight_mutation_bytes += next_size + self._in_flight_mutation_count += next_count + elif start_idx != end_idx: + break + else: + self._capacity_condition.wait_for( + lambda: self._has_capacity(next_count, next_size) + ) + yield mutations[start_idx:end_idx] + + +class MutationsBatcher: + """ + Allows users to send batches using context manager API. + + Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining + to use as few network requests as required + + Will automatically flush the batcher: + - every flush_interval seconds + - after queue size reaches flush_limit_mutation_count + - after queue reaches flush_limit_bytes + - when batcher is closed or destroyed + + Args: + table: table or autrhorized_view used to preform rpc calls + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. + batch_attempt_timeout: timeout for each individual request, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + """ + + def __init__( + self, + table: TargetType, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + (self._operation_timeout, self._attempt_timeout) = _get_timeouts( + batch_operation_timeout, batch_attempt_timeout, table + ) + self._retryable_errors: list[type[Exception]] = _get_retryable_errors( + batch_retryable_errors, table + ) + self._closed = CrossSync._Sync_Impl.Event() + self._target = table + self._staged_entries: list[RowMutationEntry] = [] + (self._staged_count, self._staged_bytes) = (0, 0) + self._flow_control = CrossSync._Sync_Impl._FlowControl( + flow_control_max_mutation_count, flow_control_max_bytes + ) + self._flush_limit_bytes = flush_limit_bytes + self._flush_limit_count = ( + flush_limit_mutation_count + if flush_limit_mutation_count is not None + else float("inf") + ) + self._sync_rpc_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=8) + if not CrossSync._Sync_Impl.is_async + else None + ) + self._sync_flush_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=4) + if not CrossSync._Sync_Impl.is_async + else None + ) + self._flush_timer = CrossSync._Sync_Impl.create_task( + self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor + ) + self._flush_jobs: set[CrossSync._Sync_Impl.Future[None]] = set() + self._entries_processed_since_last_raise: int = 0 + self._exceptions_since_last_raise: int = 0 + self._exception_list_limit: int = 10 + self._oldest_exceptions: list[Exception] = [] + self._newest_exceptions: deque[Exception] = deque( + maxlen=self._exception_list_limit + ) + atexit.register(self._on_exit) + + def _timer_routine(self, interval: float | None) -> None: + """Set up a background task to flush the batcher every interval seconds + + If interval is None, an empty future is returned + + Args: + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed.""" + if not interval or interval <= 0: + return None + while not self._closed.is_set(): + CrossSync._Sync_Impl.event_wait( + self._closed, timeout=interval, async_break_early=False + ) + if not self._closed.is_set() and self._staged_entries: + self._schedule_flush() + + def append(self, mutation_entry: RowMutationEntry): + """Add a new set of mutations to the internal queue + + Args: + mutation_entry: new entry to add to flush queue + Raises: + RuntimeError: if batcher is closed + ValueError: if an invalid mutation type is added""" + if self._closed.is_set(): + raise RuntimeError("Cannot append to closed MutationsBatcher") + if isinstance(cast(Mutation, mutation_entry), Mutation): + raise ValueError( + f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher" + ) + self._staged_entries.append(mutation_entry) + self._staged_count += len(mutation_entry.mutations) + self._staged_bytes += mutation_entry.size() + if ( + self._staged_count >= self._flush_limit_count + or self._staged_bytes >= self._flush_limit_bytes + ): + self._schedule_flush() + CrossSync._Sync_Impl.yield_to_event_loop() + + def _schedule_flush(self) -> CrossSync._Sync_Impl.Future[None] | None: + """Update the flush task to include the latest staged entries + + Returns: + Future[None] | None: + future representing the background task, if started""" + if self._staged_entries: + (entries, self._staged_entries) = (self._staged_entries, []) + (self._staged_count, self._staged_bytes) = (0, 0) + new_task = CrossSync._Sync_Impl.create_task( + self._flush_internal, entries, sync_executor=self._sync_flush_executor + ) + if not new_task.done(): + self._flush_jobs.add(new_task) + new_task.add_done_callback(self._flush_jobs.remove) + return new_task + return None + + def _flush_internal(self, new_entries: list[RowMutationEntry]): + """Flushes a set of mutations to the server, and updates internal state + + Args: + new_entries list of RowMutationEntry objects to flush""" + in_process_requests: list[ + CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]] + ] = [] + for batch in self._flow_control.add_to_flow(new_entries): + batch_task = CrossSync._Sync_Impl.create_task( + self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor + ) + in_process_requests.append(batch_task) + found_exceptions = self._wait_for_batch_results(*in_process_requests) + self._entries_processed_since_last_raise += len(new_entries) + self._add_exceptions(found_exceptions) + + def _execute_mutate_rows( + self, batch: list[RowMutationEntry] + ) -> list[FailedMutationEntryError]: + """Helper to execute mutation operation on a batch + + Args: + batch: list of RowMutationEntry objects to send to server + timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. + If not given, will use table defaults + Returns: + list[FailedMutationEntryError]: + list of FailedMutationEntryError objects for mutations that failed. + FailedMutationEntryError objects will not contain index information""" + try: + operation = CrossSync._Sync_Impl._MutateRowsOperation( + self._target.client._gapic_client, + self._target, + batch, + operation_timeout=self._operation_timeout, + attempt_timeout=self._attempt_timeout, + retryable_exceptions=self._retryable_errors, + ) + operation.start() + except MutationsExceptionGroup as e: + for subexc in e.exceptions: + subexc.index = None + return list(e.exceptions) + finally: + self._flow_control.remove_from_flow(batch) + return [] + + def _add_exceptions(self, excs: list[Exception]): + """Add new list of exceptions to internal store. To avoid unbounded memory, + the batcher will store the first and last _exception_list_limit exceptions, + and discard any in between. + + Args: + excs: list of exceptions to add to the internal store""" + self._exceptions_since_last_raise += len(excs) + if excs and len(self._oldest_exceptions) < self._exception_list_limit: + addition_count = self._exception_list_limit - len(self._oldest_exceptions) + self._oldest_exceptions.extend(excs[:addition_count]) + excs = excs[addition_count:] + if excs: + self._newest_exceptions.extend(excs[-self._exception_list_limit :]) + + def _raise_exceptions(self): + """Raise any unreported exceptions from background flush operations + + Raises: + MutationsExceptionGroup: exception group with all unreported exceptions""" + if self._oldest_exceptions or self._newest_exceptions: + (oldest, self._oldest_exceptions) = (self._oldest_exceptions, []) + newest = list(self._newest_exceptions) + self._newest_exceptions.clear() + (entry_count, self._entries_processed_since_last_raise) = ( + self._entries_processed_since_last_raise, + 0, + ) + (exc_count, self._exceptions_since_last_raise) = ( + self._exceptions_since_last_raise, + 0, + ) + raise MutationsExceptionGroup.from_truncated_lists( + first_list=oldest, + last_list=newest, + total_excs=exc_count, + entry_count=entry_count, + ) + + def __enter__(self): + """Allow use of context manager API""" + return self + + def __exit__(self, exc_type, exc, tb): + """Allow use of context manager API. + + Flushes the batcher and cleans up resources.""" + self.close() + + @property + def closed(self) -> bool: + """Returns: + - True if the batcher is closed, False otherwise""" + return self._closed.is_set() + + def close(self): + """Flush queue and clean up resources""" + self._closed.set() + self._flush_timer.cancel() + self._schedule_flush() + if self._sync_flush_executor: + with self._sync_flush_executor: + self._sync_flush_executor.shutdown(wait=True) + if self._sync_rpc_executor: + with self._sync_rpc_executor: + self._sync_rpc_executor.shutdown(wait=True) + CrossSync._Sync_Impl.wait([*self._flush_jobs, self._flush_timer]) + atexit.unregister(self._on_exit) + self._raise_exceptions() + + def _on_exit(self): + """Called when program is exited. Raises warning if unflushed mutations remain""" + if not self._closed.is_set() and self._staged_entries: + warnings.warn( + f"MutationsBatcher for target {self._target!r} was not closed. {len(self._staged_entries)} Unflushed mutations will not be sent to the server." + ) + + @staticmethod + def _wait_for_batch_results( + *tasks: CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]] + | CrossSync._Sync_Impl.Future[None], + ) -> list[Exception]: + """Takes in a list of futures representing _execute_mutate_rows tasks, + waits for them to complete, and returns a list of errors encountered. + + Args: + *tasks: futures representing _execute_mutate_rows or _flush_internal tasks + Returns: + list[Exception]: + list of Exceptions encountered by any of the tasks. Errors are expected + to be FailedMutationEntryError, representing a failed mutation operation. + If a task fails with a different exception, it will be included in the + output list. Successful tasks will not be represented in the output list. + """ + if not tasks: + return [] + exceptions: list[Exception] = [] + for task in tasks: + try: + exc_list = task.result() + if exc_list: + for exc in exc_list: + exc.index = None + exceptions.extend(exc_list) + except Exception as e: + exceptions.append(e) + return exceptions diff --git a/google/cloud/bigtable/data/exceptions.py b/google/cloud/bigtable/data/exceptions.py new file mode 100644 index 000000000..b19e0e5ea --- /dev/null +++ b/google/cloud/bigtable/data/exceptions.py @@ -0,0 +1,343 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +import sys + +from typing import Any, TYPE_CHECKING + +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data.row import Row + +is_311_plus = sys.version_info >= (3, 11) + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + +class InvalidChunk(core_exceptions.GoogleAPICallError): + """Exception raised to invalid chunk data from back-end.""" + + +class _RowSetComplete(Exception): + """ + Internal exception for _ReadRowsOperation + Raised in revise_request_rowset when there are no rows left to process when starting a retry attempt + """ + + pass + + +class _ResetRow(Exception): # noqa: F811 + """ + Internal exception for _ReadRowsOperation + + Denotes that the server sent a reset_row marker, telling the client to drop + all previous chunks for row_key and re-read from the beginning. + + Args: + chunk: the reset_row chunk + """ + + def __init__(self, chunk): + self.chunk = chunk + + +class _MutateRowsIncomplete(RuntimeError): + """ + Exception raised when a mutate_rows call has unfinished work. + """ + + pass + + +class _BigtableExceptionGroup(ExceptionGroup if is_311_plus else Exception): # type: ignore # noqa: F821 + """ + Represents one or more exceptions that occur during a bulk Bigtable operation + + In Python 3.11+, this is an unmodified exception group. In < 3.10, it is a + custom exception with some exception group functionality backported, but does + Not implement the full API + """ + + def __init__(self, message, excs): + if is_311_plus: + super().__init__(message, excs) + else: + if len(excs) == 0: + raise ValueError("exceptions must be a non-empty sequence") + self.exceptions = tuple(excs) + # simulate an exception group in Python < 3.11 by adding exception info + # to the message + first_line = "--+---------------- 1 ----------------" + last_line = "+------------------------------------" + message_parts = [message + "\n" + first_line] + # print error info for each exception in the group + for idx, e in enumerate(excs[:15]): + # apply index header + if idx != 0: + message_parts.append( + f"+---------------- {str(idx + 1).rjust(2)} ----------------" + ) + cause = e.__cause__ + # if this exception was had a cause, print the cause first + # used to display root causes of FailedMutationEntryError and FailedQueryShardError + # format matches the error output of Python 3.11+ + if cause is not None: + message_parts.extend( + f"| {type(cause).__name__}: {cause}".splitlines() + ) + message_parts.append("| ") + message_parts.append( + "| The above exception was the direct cause of the following exception:" + ) + message_parts.append("| ") + # attach error message for this sub-exception + # if the subexception is also a _BigtableExceptionGroup, + # error messages will be nested + message_parts.extend(f"| {type(e).__name__}: {e}".splitlines()) + # truncate the message if there are more than 15 exceptions + if len(excs) > 15: + message_parts.append("+---------------- ... ---------------") + message_parts.append(f"| and {len(excs) - 15} more") + if last_line not in message_parts[-1]: + # in the case of nested _BigtableExceptionGroups, the last line + # does not need to be added, since one was added by the final sub-exception + message_parts.append(last_line) + super().__init__("\n ".join(message_parts)) + + def __new__(cls, message, excs): + if is_311_plus: + return super().__new__(cls, message, excs) + else: + return super().__new__(cls) + + def __str__(self): + if is_311_plus: + # don't return built-in sub-exception message + return self.args[0] + return super().__str__() + + def __repr__(self): + """ + repr representation should strip out sub-exception details + """ + if is_311_plus: + return super().__repr__() + message = self.args[0].split("\n")[0] + return f"{self.__class__.__name__}({message!r}, {self.exceptions!r})" + + +class MutationsExceptionGroup(_BigtableExceptionGroup): + """ + Represents one or more exceptions that occur during a bulk mutation operation + + Exceptions will typically be of type FailedMutationEntryError, but other exceptions may + be included if they are raised during the mutation operation + """ + + @staticmethod + def _format_message( + excs: list[Exception], total_entries: int, exc_count: int | None = None + ) -> str: + """ + Format a message for the exception group + + Args: + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + exc_count: the number of exceptions associated with the request + if None, this will be len(excs) + Returns: + str: the formatted message + """ + exc_count = exc_count if exc_count is not None else len(excs) + entry_str = "entry" if exc_count == 1 else "entries" + return f"{exc_count} failed {entry_str} from {total_entries} attempted." + + def __init__( + self, excs: list[Exception], total_entries: int, message: str | None = None + ): + """ + Args: + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + message: the message for the exception group. If None, a default message + will be generated + """ + message = ( + message + if message is not None + else self._format_message(excs, total_entries) + ) + super().__init__(message, excs) + self.total_entries_attempted = total_entries + + def __new__( + cls, excs: list[Exception], total_entries: int, message: str | None = None + ): + """ + Args: + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + message: the message for the exception group. If None, a default message + Returns: + MutationsExceptionGroup: the new instance + """ + message = ( + message if message is not None else cls._format_message(excs, total_entries) + ) + instance = super().__new__(cls, message, excs) + instance.total_entries_attempted = total_entries + return instance + + @classmethod + def from_truncated_lists( + cls, + first_list: list[Exception], + last_list: list[Exception], + total_excs: int, + entry_count: int, + ) -> MutationsExceptionGroup: + """ + Create a MutationsExceptionGroup from two lists of exceptions, representing + a larger set that has been truncated. The MutationsExceptionGroup will + contain the union of the two lists as sub-exceptions, and the error message + describe the number of exceptions that were truncated. + + Args: + first_list: the set of oldest exceptions to add to the ExceptionGroup + last_list: the set of newest exceptions to add to the ExceptionGroup + total_excs: the total number of exceptions associated with the request + Should be len(first_list) + len(last_list) + number of dropped exceptions + in the middle + entry_count: the total number of entries attempted, successful or not + Returns: + MutationsExceptionGroup: the new instance + """ + first_count, last_count = len(first_list), len(last_list) + if first_count + last_count >= total_excs: + # no exceptions were dropped + return cls(first_list + last_list, entry_count) + excs = first_list + last_list + truncation_count = total_excs - (first_count + last_count) + base_message = cls._format_message(excs, entry_count, total_excs) + first_message = f"first {first_count}" if first_count else "" + last_message = f"last {last_count}" if last_count else "" + conjunction = " and " if first_message and last_message else "" + message = f"{base_message} ({first_message}{conjunction}{last_message} attached as sub-exceptions; {truncation_count} truncated)" + return cls(excs, entry_count, message) + + +class FailedMutationEntryError(Exception): + """ + Represents a single failed RowMutationEntry in a bulk_mutate_rows request. + A collection of FailedMutationEntryErrors will be raised in a MutationsExceptionGroup + """ + + def __init__( + self, + failed_idx: int | None, + failed_mutation_entry: "RowMutationEntry", + cause: Exception, + ): + idempotent_msg = ( + "idempotent" if failed_mutation_entry.is_idempotent() else "non-idempotent" + ) + index_msg = f" at index {failed_idx}" if failed_idx is not None else "" + message = f"Failed {idempotent_msg} mutation entry{index_msg}" + super().__init__(message) + self.__cause__ = cause + self.index = failed_idx + self.entry = failed_mutation_entry + + +class RetryExceptionGroup(_BigtableExceptionGroup): + """Represents one or more exceptions that occur during a retryable operation""" + + @staticmethod + def _format_message(excs: list[Exception]): + if len(excs) == 0: + return "No exceptions" + plural = "s" if len(excs) > 1 else "" + return f"{len(excs)} failed attempt{plural}" + + def __init__(self, excs: list[Exception]): + super().__init__(self._format_message(excs), excs) + + def __new__(cls, excs: list[Exception]): + return super().__new__(cls, cls._format_message(excs), excs) + + +class ShardedReadRowsExceptionGroup(_BigtableExceptionGroup): + """ + Represents one or more exceptions that occur during a sharded read rows operation + """ + + @staticmethod + def _format_message(excs: list[FailedQueryShardError], total_queries: int): + query_str = "query" if total_queries == 1 else "queries" + plural_str = "" if len(excs) == 1 else "s" + return f"{len(excs)} sub-exception{plural_str} (from {total_queries} {query_str} attempted)" + + def __init__( + self, + excs: list[FailedQueryShardError], + succeeded: list[Row], + total_queries: int, + ): + super().__init__(self._format_message(excs, total_queries), excs) + self.successful_rows = succeeded + + def __new__( + cls, excs: list[FailedQueryShardError], succeeded: list[Row], total_queries: int + ): + instance = super().__new__(cls, cls._format_message(excs, total_queries), excs) + instance.successful_rows = succeeded + return instance + + +class FailedQueryShardError(Exception): + """ + Represents an individual failed query in a sharded read rows operation + """ + + def __init__( + self, + failed_index: int, + failed_query: "ReadRowsQuery" | dict[str, Any], + cause: Exception, + ): + message = f"Failed query at index {failed_index}" + super().__init__(message) + self.__cause__ = cause + self.index = failed_index + self.query = failed_query + + +class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError): + """Exception raised to invalid query response data from back-end.""" + + # Set to internal. This is representative of an internal error. + code = 13 + + +class ParameterTypeInferenceFailed(ValueError): + """Exception raised when query parameter types were not provided and cannot be inferred.""" + + +class EarlyMetadataCallError(RuntimeError): + """Execption raised when metadata is request from an ExecuteQueryIterator before the first row has been read, or the query has completed""" diff --git a/google/cloud/bigtable/data/execute_query/__init__.py b/google/cloud/bigtable/data/execute_query/__init__.py new file mode 100644 index 000000000..029e79b93 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/__init__.py @@ -0,0 +1,43 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( + ExecuteQueryIterator, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + SqlType, +) +from google.cloud.bigtable.data.execute_query.values import ( + ExecuteQueryValueType, + QueryResultRow, + Struct, +) +from google.cloud.bigtable.data._cross_sync import CrossSync + +CrossSync.add_mapping("ExecuteQueryIterator", ExecuteQueryIteratorAsync) +CrossSync._Sync_Impl.add_mapping("ExecuteQueryIterator", ExecuteQueryIterator) + +__all__ = [ + "ExecuteQueryValueType", + "SqlType", + "QueryResultRow", + "Struct", + "Metadata", + "ExecuteQueryIteratorAsync", + "ExecuteQueryIterator", +] diff --git a/google/cloud/bigtable/data/execute_query/_async/__init__.py b/google/cloud/bigtable/data/execute_query/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py new file mode 100644 index 000000000..2beda4cd6 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -0,0 +1,315 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ( + Any, + Dict, + Optional, + Sequence, + Tuple, + TYPE_CHECKING, +) +from google.api_core import retry as retries +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper + +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor +from google.cloud.bigtable.data._helpers import ( + _attempt_timeout_generator, + _retry_exception_factory, +) +from google.cloud.bigtable.data.exceptions import ( + EarlyMetadataCallError, + InvalidExecuteQueryResponse, +) +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata +from google.cloud.bigtable.data.execute_query._reader import ( + _QueryResultRowReader, + _Reader, +) +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryRequest as ExecuteQueryRequestPB, + ExecuteQueryResponse, +) + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + if CrossSync.is_async: + from google.cloud.bigtable.data import BigtableDataClientAsync as DataClientType + else: + from google.cloud.bigtable.data import BigtableDataClient as DataClientType + +__CROSS_SYNC_OUTPUT__ = ( + "google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator" +) + + +def _has_resume_token(response: ExecuteQueryResponse) -> bool: + response_pb = response._pb # proto-plus attribute retrieval is slow. + if response_pb.HasField("results"): + results = response_pb.results + return len(results.resume_token) > 0 + return False + + +@CrossSync.convert_class(sync_name="ExecuteQueryIterator") +class ExecuteQueryIteratorAsync: + @CrossSync.convert( + docstring_format_vars={ + "NO_LOOP": ( + "RuntimeError: if the instance is not created within an async event loop context.", + "None", + ), + "TASK_OR_THREAD": ("asyncio Tasks", "threads"), + } + ) + def __init__( + self, + client: DataClientType, + instance_id: str, + app_profile_id: Optional[str], + request_body: Dict[str, Any], + prepare_metadata: Metadata, + attempt_timeout: float | None, + operation_timeout: float, + req_metadata: Sequence[Tuple[str, str]] = (), + retryable_excs: Sequence[type[Exception]] = (), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> None: + """ + Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + + **Please Note** this is not meant to be constructed directly by applications. It should always + be created via the client. The constructor is subject to change. + + It is **not thread-safe**. It should not be used by multiple {TASK_OR_THREAD}. + + Args: + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + column_info: dict with mappings between column names and additional column information + for protobuf deserialization. + Raises: + {NO_LOOP} + :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ + self._table_name = None + self._app_profile_id = app_profile_id + self._client = client + self._instance_id = instance_id + self._prepare_metadata: Metadata = prepare_metadata + self._final_metadata: Metadata | None = None + self._byte_cursor = _ByteCursor() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() + self.has_received_token = False + self._result_generator = self._next_impl() + self._register_instance_task = None + self._fully_consumed = False + self._is_closed = False + self._request_body = request_body + self._attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self._stream = CrossSync.retry_target_stream( + self._make_request_with_resume_token, + retries.if_exception_type(*retryable_excs), + retries.exponential_sleep_generator(0.01, 60, multiplier=2), + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self._req_metadata = req_metadata + self._column_info = column_info + try: + self._register_instance_task = CrossSync.create_task( + self._client._register_instance, + self._instance_id, + self.app_profile_id, + id(self), + sync_executor=self._client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + def is_closed(self) -> bool: + """Returns True if the iterator is closed, False otherwise.""" + return self._is_closed + + @property + def app_profile_id(self) -> Optional[str]: + """Returns the app_profile_id of the iterator.""" + return self._app_profile_id + + @property + def table_name(self) -> Optional[str]: + """Returns the table_name of the iterator.""" + return self._table_name + + @CrossSync.convert + async def _make_request_with_resume_token(self): + """ + perfoms the rpc call using the correct resume token. + """ + resume_token = self._byte_cursor.prepare_for_new_request() + request = ExecuteQueryRequestPB( + { + **self._request_body, + "resume_token": resume_token, + } + ) + return await self._client._gapic_client.execute_query( + request, + timeout=next(self._attempt_timeout_gen), + metadata=self._req_metadata, + retry=None, + ) + + @CrossSync.convert + async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: + """ + Generator wrapping the response stream which parses the stream results + and returns full `QueryResultRow`s. + """ + try: + async for response in self._stream: + try: + # we've received a resume token, so we can finalize the metadata + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() + + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: + continue + # metadata must be set at this point since there must be a resume_token + # for byte_cursor to yield data + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info + ) + if results is None: + continue + + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + + for result in results: + yield result + # this means the stream has finished with no responses. In that case we know the + # latest_prepare_reponses was used successfully so we can finalize the metadata + if self._final_metadata is None: + self._finalize_metadata() + self._fully_consumed = True + finally: + self._close_internal() + + @CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"}) + async def __anext__(self) -> QueryResultRow: + """ + Yields QueryResultRows representing the results of the query. + + :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ + if self._is_closed: + raise CrossSync.StopIteration + return await self._result_generator.__anext__() + + @CrossSync.convert(sync_name="__iter__") + def __aiter__(self): + return self + + @CrossSync.convert + def _finalize_metadata(self) -> None: + """ + Sets _final_metadata to the metadata of the latest prepare_response. + The iterator should call this after either the first resume token is received or the + stream completes succesfully with no responses. + + This can't be set on init because the metadata will be able to change due to plan refresh. + Plan refresh isn't implemented yet, but we want functionality to stay the same when it is. + + For example the following scenario for query "SELECT * FROM table": + - Make a request, table has one column family 'cf' + - Return an incomplete batch + - request fails with transient error + - Meanwhile the table has had a second column family added 'cf2' + - Retry the request, get an error indicating the `prepared_query` has expired + - Refresh the prepared_query and retry the request, the new prepared_query + contains both 'cf' & 'cf2' + - It sends a new incomplete batch and resets the old outdated batch + - It send the next chunk with a checksum and resume_token, closing the batch. + In this we need to use the updated schema from the refreshed prepare request. + """ + self._final_metadata = self._prepare_metadata + + @property + def metadata(self) -> Metadata: + """ + Returns query metadata from the server or None if the iterator has been closed + or if metadata has not been set yet. + + Metadata will not be set until the first row has been yielded or response with no rows + completes. + + raises: :class:`EarlyMetadataCallError` when called before the first row has been returned + or the iterator has completed with no rows in the response. + """ + if not self._final_metadata: + raise EarlyMetadataCallError() + return self._final_metadata + + @CrossSync.convert + async def close(self) -> None: + """ + Cancel all background tasks. Should be called after all rows were processed. + + Called automatically by iterator + + :raises: :class:`ValueError ` if called in an invalid state + """ + # this doesn't need to be async anymore but we wrap the sync api to avoid a breaking + # change + self._close_internal() + + def _close_internal(self) -> None: + if self._is_closed: + return + # Throw an error if the iterator has been successfully consumed but there is + # still buffered data + if self._fully_consumed and not self._byte_cursor.empty(): + raise ValueError("Unexpected buffered data at end of executeQuery reqest") + self._is_closed = True + if self._register_instance_task is not None: + self._register_instance_task.cancel() + self._client._remove_instance_registration( + self._instance_id, self.app_profile_id, id(self) + ) diff --git a/google/cloud/bigtable/data/execute_query/_byte_cursor.py b/google/cloud/bigtable/data/execute_query/_byte_cursor.py new file mode 100644 index 000000000..16eacbe9b --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_byte_cursor.py @@ -0,0 +1,123 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional + +from google.cloud.bigtable.data.execute_query._checksum import _CRC32C +from google.cloud.bigtable_v2 import ExecuteQueryResponse + + +class _ByteCursor: + """ + Buffers bytes from `ExecuteQuery` responses until resume_token is received or end-of-stream + is reached. :class:`google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse` obtained from + the server should be passed to the ``consume`` method and its non-None results should be passed + to appropriate :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered + bytes. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.execute_query_reader._Reader` for more context. + """ + + def __init__(self): + self._batch_buffer = bytearray() + self._batches: List[bytes] = [] + self._resume_token = None + + def reset(self): + self._batch_buffer = bytearray() + self._batches = [] + + def prepare_for_new_request(self): + """ + Prepares this ``_ByteCursor`` for retrying an ``ExecuteQuery`` request. + + Clears internal buffers of this ``_ByteCursor`` and returns last received + ``resume_token`` to be used in retried request. + + This is the only method that returns ``resume_token`` to the user. + Returning the token to the user is tightly coupled with clearing internal + buffers to prevent accidental retry without clearing the state, what would + cause invalid results. ``resume_token`` are not needed in other cases, + thus they is no separate getter for it. + + Returns: + bytes: Last received resume_token. + """ + # The first response of any retried stream will always contain reset, so + # this isn't actually necessary, but we do it for safety + self.reset() + return self._resume_token + + def empty(self) -> bool: + return not self._batch_buffer and not self._batches + + def consume(self, response: ExecuteQueryResponse) -> Optional[List[bytes]]: + """ + Reads results bytes from an ``ExecuteQuery`` response and adds them to a buffer. + + If the response contains a ``resume_token``: + - the ``resume_token`` is saved in this ``_ByteCursor``, and + - internal buffers are flushed and returned to the caller. + + ``resume_token`` is not available directly, but can be retrieved by calling + :meth:`._ByteCursor.prepare_for_new_request` when preparing to retry a request. + + Args: + response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): + Response obtained from the stream. + + Returns: + bytes or None: List of bytes if buffers were flushed or None otherwise. + Each element in the list represents the bytes of a `ProtoRows` message. + + Raises: + ValueError: If provided ``ExecuteQueryResponse`` is not valid + or contains bytes representing response of a different kind than previously + processed responses. + """ + response_pb = response._pb # proto-plus attribute retrieval is slow. + + if response_pb.HasField("results"): + results = response_pb.results + if results.reset: + self.reset() + if results.HasField("proto_rows_batch"): + self._batch_buffer.extend(results.proto_rows_batch.batch_data) + # Note that 0 is a valid checksum so we must check for field presence + if results.HasField("batch_checksum"): + expected_checksum = results.batch_checksum + checksum = _CRC32C.checksum(self._batch_buffer) + if expected_checksum != checksum: + raise ValueError( + f"Unexpected checksum mismatch. Expected: {expected_checksum}, got: {checksum}" + ) + # We have a complete batch so we move it to batches and reset the + # batch_buffer + self._batches.append(memoryview(self._batch_buffer)) + self._batch_buffer = bytearray() + + if results.resume_token: + self._resume_token = results.resume_token + + if self._batches: + if self._batch_buffer: + raise ValueError("Unexpected resume_token without checksum") + return_value = self._batches + self._batches = [] + return return_value + else: + raise ValueError(f"Unexpected ExecuteQueryResponse: {response}") + return None diff --git a/google/cloud/bigtable/data/execute_query/_checksum.py b/google/cloud/bigtable/data/execute_query/_checksum.py new file mode 100644 index 000000000..b45a164d5 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_checksum.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings + +with warnings.catch_warnings(record=True) as import_warning: + import google_crc32c # type: ignore + + +class _CRC32C(object): + """ + Wrapper around ``google_crc32c`` library + """ + + warn_emitted = False + + @classmethod + def checksum(cls, val: bytearray) -> int: + """ + Returns the crc32c checksum of the data. + """ + if import_warning and not cls.warn_emitted: + cls.warn_emitted = True + warnings.warn( + "Using pure python implementation of `google-crc32` for ExecuteQuery response " + "validation. This is significantly slower than the c extension. If possible, " + "run in an environment that supports the c extension.", + RuntimeWarning, + ) + memory_view = memoryview(val) + return google_crc32c.value(bytes(memory_view)) diff --git a/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/google/cloud/bigtable/data/execute_query/_parameters_formatting.py new file mode 100644 index 000000000..ed7e946e8 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_parameters_formatting.py @@ -0,0 +1,155 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +from typing import Any, Dict, Optional + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable_v2.types.data import Value + + +def _format_execute_query_params( + params: Optional[Dict[str, ExecuteQueryValueType]], + parameter_types: Optional[Dict[str, SqlType.Type]], +) -> Dict[str, Value]: + """ + Takes a dictionary of param_name -> param_value and optionally parameter types. + If the parameters types are not provided, this function tries to infer them. + + Args: + params (Optional[Dict[str, ExecuteQueryValueType]]): mapping from parameter names + like they appear in query (without @ at the beginning) to their values. + Only values of type ExecuteQueryValueType are permitted. + parameter_types (Optional[Dict[str, SqlType.Type]]): mapping of parameter names + to their types. + + Raises: + ValueError: raised when parameter types cannot be inferred and were not + provided explicitly. + + Returns: + dictionary prasable to a protobuf represenging parameters as defined + in ExecuteQueryRequest.params + """ + if not params: + return {} + parameter_types = parameter_types or {} + + result_values = {} + for key, value in params.items(): + user_provided_type = parameter_types.get(key) + try: + if user_provided_type: + if not isinstance(user_provided_type, SqlType.Type): + raise ValueError( + f"Parameter type for {key} should be provided as an instance of SqlType.Type subclass." + ) + param_type = user_provided_type + else: + param_type = _detect_type(value) + + value_pb_dict = _convert_value_to_pb_value_dict(value, param_type) + except ValueError as err: + raise ValueError(f"Error when parsing parameter {key}") from err + result_values[key] = value_pb_dict + + return result_values + + +def _to_param_types( + params: Optional[Dict[str, ExecuteQueryValueType]], + param_types: Optional[Dict[str, SqlType.Type]], +) -> Dict[str, Dict[str, Any]]: + """ + Takes the params and user supplied types and creates a param_type dict for the PrepareQuery api + + Args: + params: Dict of param name to param value + param_types: Dict of param name to param type for params with types that cannot be inferred + + Returns: + Dict containing the param name and type for each parameter + """ + if params is None: + return {} + formatted_types = {} + for param_key, param_value in params.items(): + if param_types and param_key in param_types: + formatted_types[param_key] = param_types[param_key]._to_type_pb_dict() + else: + formatted_types[param_key] = _detect_type(param_value)._to_type_pb_dict() + return formatted_types + + +def _convert_value_to_pb_value_dict( + value: ExecuteQueryValueType, param_type: SqlType.Type +) -> Any: + """ + Takes a value and converts it to a dictionary parsable to a protobuf. + + Args: + value (ExecuteQueryValueType): value + param_type (SqlType.Type): object describing which ExecuteQuery type the value represents. + + Returns: + dictionary parsable to a protobuf. + """ + # type field will be set only in top-level Value. + value_dict = param_type._to_value_pb_dict(value) + value_dict["type_"] = param_type._to_type_pb_dict() + return value_dict + + +_TYPES_TO_TYPE_DICTS = [ + (bytes, SqlType.Bytes()), + (str, SqlType.String()), + (bool, SqlType.Bool()), + (int, SqlType.Int64()), + (DatetimeWithNanoseconds, SqlType.Timestamp()), + (datetime.datetime, SqlType.Timestamp()), + (datetime.date, SqlType.Date()), +] + + +def _detect_type(value: ExecuteQueryValueType) -> SqlType.Type: + """ + Infers the ExecuteQuery type based on value. Raises error if type is amiguous. + raises ParameterTypeInferenceFailed if not possible. + """ + if value is None: + raise ParameterTypeInferenceFailed( + "Cannot infer type of None, please provide the type manually." + ) + + if isinstance(value, list): + raise ParameterTypeInferenceFailed( + "Cannot infer type of ARRAY parameters, please provide the type manually." + ) + + if isinstance(value, float): + raise ParameterTypeInferenceFailed( + "Cannot infer type of float, must specify either FLOAT32 or FLOAT64 type manually." + ) + + for field_type, type_dict in _TYPES_TO_TYPE_DICTS: + if isinstance(value, field_type): + return type_dict + + raise ParameterTypeInferenceFailed( + f"Cannot infer type of {type(value).__name__}, please provide the type manually." + ) diff --git a/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py new file mode 100644 index 000000000..a43539e55 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py @@ -0,0 +1,265 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import Any, Callable, Dict, Type, Optional, Union + +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable_v2 import Value as PBValue +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +_REQUIRED_PROTO_FIELDS = { + SqlType.Bytes: "bytes_value", + SqlType.String: "string_value", + SqlType.Int64: "int_value", + SqlType.Float32: "float_value", + SqlType.Float64: "float_value", + SqlType.Bool: "bool_value", + SqlType.Timestamp: "timestamp_value", + SqlType.Date: "date_value", + SqlType.Struct: "array_value", + SqlType.Array: "array_value", + SqlType.Map: "array_value", + SqlType.Proto: "bytes_value", + SqlType.Enum: "int_value", +} + + +def _parse_array_type( + value: PBValue, + metadata_type: SqlType.Array, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> list[Any]: + """ + used for parsing an array represented as a protobuf to a python list. + """ + return list( + map( + lambda val: _parse_pb_value_to_python_value( + val, metadata_type.element_type, column_name, column_info + ), + value.array_value.values, + ) + ) + + +def _parse_map_type( + value: PBValue, + metadata_type: SqlType.Map, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> dict[Any, Any]: + """ + used for parsing a map represented as a protobuf to a python dict. + + Values of type `Map` are stored in a `Value.array_value` where each entry + is another `Value.array_value` with two elements (the key and the value, + in that order). + Normally encoded Map values won't have repeated keys, however, the client + must handle the case in which they do. If the same key appears + multiple times, the _last_ value takes precedence. + """ + + try: + return dict( + map( + lambda map_entry: ( + _parse_pb_value_to_python_value( + map_entry.array_value.values[0], + metadata_type.key_type, + f"{column_name}.key" if column_name is not None else None, + column_info, + ), + _parse_pb_value_to_python_value( + map_entry.array_value.values[1], + metadata_type.value_type, + f"{column_name}.value" if column_name is not None else None, + column_info, + ), + ), + value.array_value.values, + ) + ) + except IndexError: + raise ValueError("Invalid map entry - less or more than two values.") + + +def _parse_struct_type( + value: PBValue, + metadata_type: SqlType.Struct, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Struct: + """ + used for parsing a struct represented as a protobuf to a + google.cloud.bigtable.data.execute_query.Struct + """ + if len(value.array_value.values) != len(metadata_type.fields): + raise ValueError("Mismatched lengths of values and types.") + + struct = Struct() + for value, field in zip(value.array_value.values, metadata_type.fields): + field_name, field_type = field + nested_column_name: str | None + if column_name and field_name: + # qualify the column name for nested lookups + nested_column_name = f"{column_name}.{field_name}" + else: + nested_column_name = None + struct.add_field( + field_name, + _parse_pb_value_to_python_value( + value, field_type, nested_column_name, column_info + ), + ) + + return struct + + +def _parse_timestamp_type( + value: PBValue, + metadata_type: SqlType.Timestamp, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> DatetimeWithNanoseconds: + """ + used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds + """ + return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) + + +def _parse_proto_type( + value: PBValue, + metadata_type: SqlType.Proto, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Message | bytes: + """ + Parses a serialized protobuf message into a Message object using type information + provided in column_info. + + Args: + value: The value to parse, expected to have a bytes_value attribute. + metadata_type: The expected SQL type (Proto). + column_name: The name of the column. + column_info: (Optional) A dictionary mapping column names to their + corresponding Protobuf Message classes. This information is used + to deserialize the raw bytes. + + Returns: + A deserialized Protobuf Message object if parsing is successful. + If the required type information is not found in column_info, the function + returns the original serialized data as bytes (value.bytes_value). + This fallback ensures that the raw data is still accessible. + + Raises: + google.protobuf.message.DecodeError: If `value.bytes_value` cannot be + parsed as the Message type specified in `column_info`. + """ + if ( + column_name is not None + and column_info is not None + and column_info.get(column_name) is not None + ): + default_proto_message = column_info.get(column_name) + if isinstance(default_proto_message, Message): + proto_message = type(default_proto_message)() + proto_message.ParseFromString(value.bytes_value) + return proto_message + return value.bytes_value + + +def _parse_enum_type( + value: PBValue, + metadata_type: SqlType.Enum, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> int | str: + """ + Parses an integer value into a Protobuf enum name string using type information + provided in column_info. + + Args: + value: The value to parse, expected to have an int_value attribute. + metadata_type: The expected SQL type (Enum). + column_name: The name of the column. + column_info: (Optional) A dictionary mapping column names to their + corresponding Protobuf EnumTypeWrapper objects. This information + is used to convert the integer to an enum name. + + Returns: + A string representing the name of the enum value if conversion is successful. + If conversion fails for any reason, such as the required EnumTypeWrapper + not being found in column_info, or if an error occurs during the name lookup + (e.g., the integer is not a valid enum value), the function returns the + original integer value (value.int_value). This fallback ensures the + raw integer representation is still accessible. + """ + if ( + column_name is not None + and column_info is not None + and column_info.get(column_name) is not None + ): + proto_enum = column_info.get(column_name) + if isinstance(proto_enum, EnumTypeWrapper): + return proto_enum.Name(value.int_value) + return value.int_value + + +ParserCallable = Callable[ + [PBValue, Any, Optional[str], Optional[Dict[str, Union[Message, EnumTypeWrapper]]]], + Any, +] + +_TYPE_PARSERS: Dict[Type[SqlType.Type], ParserCallable] = { + SqlType.Timestamp: _parse_timestamp_type, + SqlType.Struct: _parse_struct_type, + SqlType.Array: _parse_array_type, + SqlType.Map: _parse_map_type, + SqlType.Proto: _parse_proto_type, + SqlType.Enum: _parse_enum_type, +} + + +def _parse_pb_value_to_python_value( + value: PBValue, + metadata_type: SqlType.Type, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Any: + """ + used for converting the value represented as a protobufs to a python object. + """ + value_kind = value.WhichOneof("kind") + if not value_kind: + return None + + kind = type(metadata_type) + if not value.HasField(_REQUIRED_PROTO_FIELDS[kind]): + raise ValueError( + f"{_REQUIRED_PROTO_FIELDS[kind]} field for {kind.__name__} type not found in a Value." + ) + + if kind in _TYPE_PARSERS: + parser = _TYPE_PARSERS[kind] + return parser(value, metadata_type, column_name, column_info) + elif kind in _REQUIRED_PROTO_FIELDS: + field_name = _REQUIRED_PROTO_FIELDS[kind] + return getattr(value, field_name) + else: + raise ValueError(f"Unknown kind {kind}") diff --git a/google/cloud/bigtable/data/execute_query/_reader.py b/google/cloud/bigtable/data/execute_query/_reader.py new file mode 100644 index 000000000..467c2030f --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_reader.py @@ -0,0 +1,142 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import ( + List, + TypeVar, + Generic, + Iterable, + Optional, + Sequence, +) +from abc import ABC, abstractmethod +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper + +from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue + +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) + +from google.cloud.bigtable.helpers import batched + +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata + + +T = TypeVar("T") + + +class _Reader(ABC, Generic[T]): + """ + An interface for classes that consume and parse bytes returned by ``_ByteCursor``. + Parsed bytes should be gathered into bundles (rows or columns) of expected size + and converted to an appropriate type ``T`` that will be returned as a semantically + meaningful result to the library user by + :meth:`google.cloud.bigtable.instance.Instance.execute_query` or + :meth:`google.cloud.bigtable.data._async.client.BigtableDataClientAsync.execute_query` + methods. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.byte_cursor._ByteCursor` for more context. + """ + + @abstractmethod + def consume( + self, + batches_to_consume: List[bytes], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> Optional[Iterable[T]]: + """This method receives a list of batches of bytes to be parsed as ProtoRows messages. + It then uses the metadata to group the values in the parsed messages into rows. Returns + None if batches_to_consume is empty + Args: + bytes_to_consume (bytes): chunk of parsable byte batches received from + :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume` + method. + metadata: metadata used to transform values to rows + column_info: (Optional) dict with mappings between column names and additional column information + for protobuf deserialization. + + Returns: + Iterable[T] or None: Iterable if gathered values can form one or more instances of T, + or None if there is not enough data to construct at least one instance of T with + appropriate number of entries. + """ + raise NotImplementedError + + +class _QueryResultRowReader(_Reader[QueryResultRow]): + """ + A :class:`._Reader` consuming bytes representing + :class:`google.cloud.bigtable_v2.types.Type` + and producing :class:`google.cloud.bigtable.execute_query.QueryResultRow`. + + Number of entries in each row is determined by number of columns in + :class:`google.cloud.bigtable.execute_query.Metadata` obtained from + :class:`google.cloud.bigtable.byte_cursor._ByteCursor` passed in the constructor. + """ + + def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: + proto_rows = ProtoRows.pb().FromString(bytes_to_parse) + return proto_rows.values + + def _construct_query_result_row( + self, + values: Sequence[PBValue], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> QueryResultRow: + result = QueryResultRow() + columns = metadata.columns + + assert len(values) == len( + columns + ), "This function should be called only when count of values matches count of columns." + + for column, value in zip(columns, values): + parsed_value = _parse_pb_value_to_python_value( + value, column.column_type, column.column_name, column_info + ) + result.add_field(column.column_name, parsed_value) + return result + + def consume( + self, + batches_to_consume: List[bytes], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> Optional[Iterable[QueryResultRow]]: + num_columns = len(metadata.columns) + rows = [] + for batch_bytes in batches_to_consume: + values = self._parse_proto_rows(batch_bytes) + for row_data in batched(values, n=num_columns): + if len(row_data) == num_columns: + rows.append( + self._construct_query_result_row( + row_data, metadata, column_info + ) + ) + else: + raise ValueError( + "Unexpected error, recieved bad number of values. " + f"Expected {num_columns} got {len(row_data)}." + ) + + return rows diff --git a/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py new file mode 100644 index 000000000..68594d0e8 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -0,0 +1,259 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Any, Dict, Optional, Sequence, Tuple, TYPE_CHECKING +from google.api_core import retry as retries +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor +from google.cloud.bigtable.data._helpers import ( + _attempt_timeout_generator, + _retry_exception_factory, +) +from google.cloud.bigtable.data.exceptions import ( + EarlyMetadataCallError, + InvalidExecuteQueryResponse, +) +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata +from google.cloud.bigtable.data.execute_query._reader import ( + _QueryResultRowReader, + _Reader, +) +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryRequest as ExecuteQueryRequestPB, + ExecuteQueryResponse, +) +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data import BigtableDataClient as DataClientType + + +def _has_resume_token(response: ExecuteQueryResponse) -> bool: + response_pb = response._pb + if response_pb.HasField("results"): + results = response_pb.results + return len(results.resume_token) > 0 + return False + + +class ExecuteQueryIterator: + def __init__( + self, + client: DataClientType, + instance_id: str, + app_profile_id: Optional[str], + request_body: Dict[str, Any], + prepare_metadata: Metadata, + attempt_timeout: float | None, + operation_timeout: float, + req_metadata: Sequence[Tuple[str, str]] = (), + retryable_excs: Sequence[type[Exception]] = (), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, + ) -> None: + """Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + + **Please Note** this is not meant to be constructed directly by applications. It should always + be created via the client. The constructor is subject to change. + + It is **not thread-safe**. It should not be used by multiple threads. + + Args: + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + column_info: dict with mappings between column names and additional column information + for protobuf deserialization. + Raises: + None + :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ + self._table_name = None + self._app_profile_id = app_profile_id + self._client = client + self._instance_id = instance_id + self._prepare_metadata: Metadata = prepare_metadata + self._final_metadata: Metadata | None = None + self._byte_cursor = _ByteCursor() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() + self.has_received_token = False + self._result_generator = self._next_impl() + self._register_instance_task = None + self._fully_consumed = False + self._is_closed = False + self._request_body = request_body + self._attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self._stream = CrossSync._Sync_Impl.retry_target_stream( + self._make_request_with_resume_token, + retries.if_exception_type(*retryable_excs), + retries.exponential_sleep_generator(0.01, 60, multiplier=2), + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self._req_metadata = req_metadata + self._column_info = column_info + try: + self._register_instance_task = CrossSync._Sync_Impl.create_task( + self._client._register_instance, + self._instance_id, + self.app_profile_id, + id(self), + sync_executor=self._client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + def is_closed(self) -> bool: + """Returns True if the iterator is closed, False otherwise.""" + return self._is_closed + + @property + def app_profile_id(self) -> Optional[str]: + """Returns the app_profile_id of the iterator.""" + return self._app_profile_id + + @property + def table_name(self) -> Optional[str]: + """Returns the table_name of the iterator.""" + return self._table_name + + def _make_request_with_resume_token(self): + """perfoms the rpc call using the correct resume token.""" + resume_token = self._byte_cursor.prepare_for_new_request() + request = ExecuteQueryRequestPB( + {**self._request_body, "resume_token": resume_token} + ) + return self._client._gapic_client.execute_query( + request, + timeout=next(self._attempt_timeout_gen), + metadata=self._req_metadata, + retry=None, + ) + + def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: + """Generator wrapping the response stream which parses the stream results + and returns full `QueryResultRow`s.""" + try: + for response in self._stream: + try: + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: + continue + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info + ) + if results is None: + continue + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + for result in results: + yield result + if self._final_metadata is None: + self._finalize_metadata() + self._fully_consumed = True + finally: + self._close_internal() + + def __next__(self) -> QueryResultRow: + """Yields QueryResultRows representing the results of the query. + + :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ + if self._is_closed: + raise CrossSync._Sync_Impl.StopIteration + return self._result_generator.__next__() + + def __iter__(self): + return self + + def _finalize_metadata(self) -> None: + """Sets _final_metadata to the metadata of the latest prepare_response. + The iterator should call this after either the first resume token is received or the + stream completes succesfully with no responses. + + This can't be set on init because the metadata will be able to change due to plan refresh. + Plan refresh isn't implemented yet, but we want functionality to stay the same when it is. + + For example the following scenario for query "SELECT * FROM table": + - Make a request, table has one column family 'cf' + - Return an incomplete batch + - request fails with transient error + - Meanwhile the table has had a second column family added 'cf2' + - Retry the request, get an error indicating the `prepared_query` has expired + - Refresh the prepared_query and retry the request, the new prepared_query + contains both 'cf' & 'cf2' + - It sends a new incomplete batch and resets the old outdated batch + - It send the next chunk with a checksum and resume_token, closing the batch. + In this we need to use the updated schema from the refreshed prepare request.""" + self._final_metadata = self._prepare_metadata + + @property + def metadata(self) -> Metadata: + """Returns query metadata from the server or None if the iterator has been closed + or if metadata has not been set yet. + + Metadata will not be set until the first row has been yielded or response with no rows + completes. + + raises: :class:`EarlyMetadataCallError` when called before the first row has been returned + or the iterator has completed with no rows in the response.""" + if not self._final_metadata: + raise EarlyMetadataCallError() + return self._final_metadata + + def close(self) -> None: + """Cancel all background tasks. Should be called after all rows were processed. + + Called automatically by iterator + + :raises: :class:`ValueError ` if called in an invalid state + """ + self._close_internal() + + def _close_internal(self) -> None: + if self._is_closed: + return + if self._fully_consumed and (not self._byte_cursor.empty()): + raise ValueError("Unexpected buffered data at end of executeQuery reqest") + self._is_closed = True + if self._register_instance_task is not None: + self._register_instance_task.cancel() + self._client._remove_instance_registration( + self._instance_id, self.app_profile_id, id(self) + ) diff --git a/google/cloud/bigtable/data/execute_query/metadata.py b/google/cloud/bigtable/data/execute_query/metadata.py new file mode 100644 index 000000000..74b6cb836 --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/metadata.py @@ -0,0 +1,425 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module provides the SqlType class used for specifying types in +ExecuteQuery and some utilities. + +The SqlTypes are used in Metadata returned by the ExecuteQuery operation as well +as for specifying query parameter types explicitly. +""" + +from collections import defaultdict +import datetime +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + +from google.cloud.bigtable.data.execute_query.values import _NamedList +from google.cloud.bigtable_v2 import ResultSetMetadata +from google.cloud.bigtable_v2 import Type as PBType + + +class SqlType: + """ + Classes denoting types of values returned by Bigtable's ExecuteQuery operation. + + Used in :class:`.Metadata`. + """ + + class Type: + expected_type: Optional[type] = None + value_pb_dict_field_name: Optional[str] = None + type_field_name: Optional[str] = None + + @classmethod + def from_pb_type(cls, pb_type: Optional[PBType] = None): + return cls() + + def _to_type_pb_dict(self) -> Dict[str, Any]: + if not self.type_field_name: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + return {self.type_field_name: {}} + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if self.expected_type is None or self.value_pb_dict_field_name is None: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return {self.value_pb_dict_field_name: value} + + def __eq__(self, other): + return isinstance(other, type(self)) + + def __str__(self) -> str: + return self.__class__.__name__ + + def __repr__(self) -> str: + return self.__str__() + + class Struct(_NamedList[Type], Type): + """Struct SQL type.""" + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct": + if type_pb is None: + raise ValueError("missing required argument type_pb") + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + for field in type_pb.struct_type.fields: + fields.append((field.field_name, _pb_type_to_metadata_type(field.type))) + return cls(fields) + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Struct is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Struct is not supported as a query parameter") + + def __eq__(self, other: object): + # Cannot use super() here - we'd either have to: + # - call super() in these base classes, which would in turn call Object.__eq__ + # to compare objects by identity and return a False, or + # - do not call super() in these base classes, which would result in calling only + # one of the __eq__ methods (a super() in the base class would be required to call the other one), or + # - call super() in only one of the base classes, but that would be error prone and changing + # the order of base classes would introduce unexpected behaviour. + # we also have to disable mypy because it doesn't see that SqlType.Struct == _NamedList[Type] + return SqlType.Type.__eq__(self, other) and _NamedList.__eq__(self, other) # type: ignore + + def __str__(self): + return super(_NamedList, self).__str__() + + class Array(Type): + """Array SQL type.""" + + def __init__(self, element_type: "SqlType.Type"): + if isinstance(element_type, SqlType.Array): + raise ValueError("Arrays of arrays are not supported.") + if isinstance(element_type, SqlType.Map): + raise ValueError("Arrays of Maps are not supported.") + self._element_type = element_type + + @property + def element_type(self): + return self._element_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Array": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls(_pb_type_to_metadata_type(type_pb.array_type.element_type)) + + def _to_value_pb_dict(self, value: Any): + if value is None: + return {} + + return { + "array_value": { + "values": [ + self.element_type._to_value_pb_dict(entry) for entry in value + ] + } + } + + def _to_type_pb_dict(self) -> Dict[str, Any]: + return { + "array_type": {"element_type": self.element_type._to_type_pb_dict()} + } + + def __eq__(self, other): + return super().__eq__(other) and self.element_type == other.element_type + + def __str__(self) -> str: + return f"{self.__class__.__name__}<{str(self.element_type)}>" + + class Map(Type): + """Map SQL type.""" + + def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"): + self._key_type = key_type + self._value_type = value_type + + @property + def key_type(self): + return self._key_type + + @property + def value_type(self): + return self._value_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Map": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls( + _pb_type_to_metadata_type(type_pb.map_type.key_type), + _pb_type_to_metadata_type(type_pb.map_type.value_type), + ) + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Map is not supported as a query parameter") + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Map is not supported as a query parameter") + + def __eq__(self, other): + return ( + super().__eq__(other) + and self.key_type == other.key_type + and self.value_type == other.value_type + ) + + def __str__(self) -> str: + return ( + f"{self.__class__.__name__}<" + f"{str(self._key_type)},{str(self._value_type)}>" + ) + + class Bytes(Type): + """Bytes SQL type.""" + + expected_type = bytes + value_pb_dict_field_name = "bytes_value" + type_field_name = "bytes_type" + + class String(Type): + """String SQL type.""" + + expected_type = str + value_pb_dict_field_name = "string_value" + type_field_name = "string_type" + + class Int64(Type): + """Int64 SQL type.""" + + expected_type = int + value_pb_dict_field_name = "int_value" + type_field_name = "int64_type" + + class Float64(Type): + """Float64 SQL type.""" + + expected_type = float + value_pb_dict_field_name = "float_value" + type_field_name = "float64_type" + + class Float32(Type): + """Float32 SQL type.""" + + expected_type = float + value_pb_dict_field_name = "float_value" + type_field_name = "float32_type" + + class Bool(Type): + """Bool SQL type.""" + + expected_type = bool + value_pb_dict_field_name = "bool_value" + type_field_name = "bool_type" + + class Timestamp(Type): + """ + Timestamp SQL type. + + Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does + not currently support nanoseconds precision. We support this for potential + compatibility in the future. Nanoseconds are currently ignored. + """ + + type_field_name = "timestamp_type" + expected_types = ( + datetime.datetime, + DatetimeWithNanoseconds, + ) + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_types): + raise ValueError( + f"Expected one of {', '.join((_type.__name__ for _type in self.expected_types))}" + ) + + if isinstance(value, DatetimeWithNanoseconds): + return {"timestamp_value": value.timestamp_pb()} + else: # value must be an instance of datetime.datetime + ts = timestamp_pb2.Timestamp() + ts.FromDatetime(value) + return {"timestamp_value": ts} + + class Date(Type): + """Date SQL type.""" + + type_field_name = "date_type" + expected_type = datetime.date + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return { + "date_value": date_pb2.Date( + year=value.year, + month=value.month, + day=value.day, + ) + } + + class Proto(Type): + """Proto SQL type.""" + + type_field_name = "proto_type" + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Proto is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Proto is not supported as a query parameter") + + class Enum(Type): + """Enum SQL type.""" + + type_field_name = "enum_type" + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Enum is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Enum is not supported as a query parameter") + + +class Metadata: + """ + Metadata class for the ExecuteQuery operation. + + Args: + columns (List[Tuple[Optional[str], SqlType.Type]]): List of column + metadata tuples. Each tuple contains the column name and the column + type. + """ + + class Column: + def __init__(self, column_name: Optional[str], column_type: SqlType.Type): + self._column_name = column_name + self._column_type = column_type + + @property + def column_name(self) -> Optional[str]: + return self._column_name + + @property + def column_type(self) -> SqlType.Type: + return self._column_type + + @property + def columns(self) -> List[Column]: + return self._columns + + def __init__( + self, columns: Optional[List[Tuple[Optional[str], SqlType.Type]]] = None + ): + self._columns: List[Metadata.Column] = [] + self._column_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if columns: + for column_name, column_type in columns: + if column_name is not None: + if column_name in self._column_indexes: + self._duplicate_names.add(column_name) + self._column_indexes[column_name].append(len(self._columns)) + self._columns.append(Metadata.Column(column_name, column_type)) + + def __getitem__(self, index_or_name: Union[str, int]) -> Column: + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious column name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._column_indexes[index_or_name]))}." + ) + if index_or_name not in self._column_indexes: + raise KeyError(f"No such column: {index_or_name}") + index = self._column_indexes[index_or_name][0] + else: + index = index_or_name + return self._columns[index] + + def __len__(self): + return len(self._columns) + + def __str__(self) -> str: + columns_str = ", ".join([str(column) for column in self._columns]) + return f"{self.__class__.__name__}([{columns_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +def _pb_metadata_to_metadata_types( + metadata_pb: ResultSetMetadata, +) -> Metadata: + if "proto_schema" in metadata_pb: + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + if not metadata_pb.proto_schema.columns: + raise ValueError("Invalid empty ResultSetMetadata received.") + for column_metadata in metadata_pb.proto_schema.columns: + fields.append( + (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type)) + ) + return Metadata(fields) + raise ValueError("Invalid ResultSetMetadata object received.") + + +_PROTO_TYPE_TO_METADATA_TYPE_FACTORY: Dict[str, Type[SqlType.Type]] = { + "bytes_type": SqlType.Bytes, + "string_type": SqlType.String, + "int64_type": SqlType.Int64, + "float32_type": SqlType.Float32, + "float64_type": SqlType.Float64, + "bool_type": SqlType.Bool, + "timestamp_type": SqlType.Timestamp, + "date_type": SqlType.Date, + "proto_type": SqlType.Proto, + "enum_type": SqlType.Enum, + "struct_type": SqlType.Struct, + "array_type": SqlType.Array, + "map_type": SqlType.Map, +} + + +def _pb_type_to_metadata_type(type_pb: PBType) -> SqlType.Type: + kind = PBType.pb(type_pb).WhichOneof("kind") + if kind in _PROTO_TYPE_TO_METADATA_TYPE_FACTORY: + return _PROTO_TYPE_TO_METADATA_TYPE_FACTORY[kind].from_pb_type(type_pb) + raise ValueError(f"Unrecognized response data type: {type_pb}") diff --git a/google/cloud/bigtable/data/execute_query/values.py b/google/cloud/bigtable/data/execute_query/values.py new file mode 100644 index 000000000..80a0bff6f --- /dev/null +++ b/google/cloud/bigtable/data/execute_query/values.py @@ -0,0 +1,123 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from typing import ( + Optional, + List, + Dict, + Set, + Union, + TypeVar, + Generic, + Tuple, + Mapping, +) +from google.type import date_pb2 # type: ignore +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +T = TypeVar("T") + + +class _NamedList(Generic[T]): + """ + A class designed to store a list of elements, which can be accessed by + name or index. + This class is different from namedtuple, because namedtuple has some + restrictions on names of fields and we do not want to have them. + """ + + _str_cls_name = "_NamedList" + + def __init__(self, fields: Optional[List[Tuple[Optional[str], T]]] = None): + self._fields: List[Tuple[Optional[str], T]] = [] + self._field_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if fields: + for field_name, field_type in fields: + self.add_field(field_name, field_type) + + def add_field(self, name: Optional[str], value: T): + if name: + if name in self._field_indexes: + self._duplicate_names.add(name) + self._field_indexes[name].append(len(self._fields)) + self._fields.append((name, value)) + + @property + def fields(self): + return self._fields + + def __getitem__(self, index_or_name: Union[str, int]): + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious field name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._field_indexes[index_or_name]))}." + ) + if index_or_name not in self._field_indexes: + raise KeyError(f"No such field: {index_or_name}") + index = self._field_indexes[index_or_name][0] + else: + index = index_or_name + return self._fields[index][1] + + def __len__(self): + return len(self._fields) + + def __eq__(self, other): + if not isinstance(other, _NamedList): + return False + + return ( + self._fields == other._fields + and self._field_indexes == other._field_indexes + ) + + def __str__(self) -> str: + fields_str = ", ".join([str(field) for field in self._fields]) + return f"{self.__class__.__name__}([{fields_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +ExecuteQueryValueType = Union[ + int, + float, + bool, + bytes, + str, + # Note that Bigtable SQL does not currently support nanosecond precision, + # only microseconds. We use this for compatibility with potential future + # support + DatetimeWithNanoseconds, + date_pb2.Date, + "Struct", + List["ExecuteQueryValueType"], + Mapping[Union[str, int, bytes], "ExecuteQueryValueType"], +] + + +class QueryResultRow(_NamedList[ExecuteQueryValueType]): + """ + Represents a single row of the result + """ + + +class Struct(_NamedList[ExecuteQueryValueType]): + """ + Represents a struct value in the result + """ diff --git a/google/cloud/bigtable/data/mutations.py b/google/cloud/bigtable/data/mutations.py new file mode 100644 index 000000000..f19b1e49e --- /dev/null +++ b/google/cloud/bigtable/data/mutations.py @@ -0,0 +1,457 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations +from typing import Any +import time +from dataclasses import dataclass +from abc import ABC, abstractmethod +from sys import getsizeof + +import google.cloud.bigtable_v2.types.bigtable as types_pb +import google.cloud.bigtable_v2.types.data as data_pb + +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE + + +# special value for SetCell mutation timestamps. If set, server will assign a timestamp +_SERVER_SIDE_TIMESTAMP = -1 + +# mutation entries above this should be rejected +_MUTATE_ROWS_REQUEST_MUTATION_LIMIT = 100_000 + + +class Mutation(ABC): + """ + Abstract base class for mutations. + + This class defines the interface for different types of mutations that can be + applied to Bigtable rows. + """ + + @abstractmethod + def _to_dict(self) -> dict[str, Any]: + """ + Convert the mutation to a dictionary representation. + + Returns: + dict[str, Any]: A dictionary representation of the mutation. + """ + raise NotImplementedError + + def _to_pb(self) -> data_pb.Mutation: + """ + Convert the mutation to a protobuf representation. + + Returns: + Mutation: A protobuf representation of the mutation. + """ + return data_pb.Mutation(**self._to_dict()) + + def is_idempotent(self) -> bool: + """ + Check if the mutation is idempotent + + Idempotent mutations can be safely retried on failure. + + Returns: + bool: True if the mutation is idempotent, False otherwise. + """ + return True + + def __str__(self) -> str: + """ + Return a string representation of the mutation. + + Returns: + str: A string representation of the mutation. + """ + return str(self._to_dict()) + + def size(self) -> int: + """ + Get the size of the mutation in bytes + + Returns: + int: The size of the mutation in bytes. + """ + return getsizeof(self._to_dict()) + + @classmethod + def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: + """ + Create a `Mutation` instance from a dictionary representation. + + Args: + input_dict: A dictionary representation of the mutation. + Returns: + Mutation: A Mutation instance created from the dictionary. + Raises: + ValueError: If the input dictionary is invalid or does not represent a valid mutation type. + """ + instance: Mutation | None = None + try: + if "set_cell" in input_dict: + details = input_dict["set_cell"] + instance = SetCell( + details["family_name"], + details["column_qualifier"], + details["value"], + details["timestamp_micros"], + ) + elif "delete_from_column" in input_dict: + details = input_dict["delete_from_column"] + time_range = details.get("time_range", {}) + start = time_range.get("start_timestamp_micros", None) + end = time_range.get("end_timestamp_micros", None) + instance = DeleteRangeFromColumn( + details["family_name"], details["column_qualifier"], start, end + ) + elif "delete_from_family" in input_dict: + details = input_dict["delete_from_family"] + instance = DeleteAllFromFamily(details["family_name"]) + elif "delete_from_row" in input_dict: + instance = DeleteAllFromRow() + elif "add_to_cell" in input_dict: + details = input_dict["add_to_cell"] + instance = AddToCell( + details["family_name"], + details["column_qualifier"]["raw_value"], + details["input"]["int_value"], + details["timestamp"]["raw_timestamp_micros"], + ) + except KeyError as e: + raise ValueError("Invalid mutation dictionary") from e + if instance is None: + raise ValueError("No valid mutation found") + if not issubclass(instance.__class__, cls): + raise ValueError("Mutation type mismatch") + return instance + + +class SetCell(Mutation): + """ + Mutation to set the value of a cell. + + Args: + family: The name of the column family to which the new cell belongs. + qualifier: The column qualifier of the new cell. + new_value: The value of the new cell. + timestamp_micros: The timestamp of the new cell. If `None`, + the current timestamp will be used. Timestamps will be sent with + millisecond precision. Extra precision will be truncated. If -1, the + server will assign a timestamp. Note that `SetCell` mutations with + server-side timestamps are non-idempotent operations and will not be retried. + + Raises: + TypeError: If `qualifier` is not `bytes` or `str`. + TypeError: If `new_value` is not `bytes`, `str`, or `int`. + ValueError: If `timestamp_micros` is less than `_SERVER_SIDE_TIMESTAMP`. + """ + + def __init__( + self, + family: str, + qualifier: bytes | str, + new_value: bytes | str | int, + timestamp_micros: int | None = None, + ): + qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier + if not isinstance(qualifier, bytes): + raise TypeError("qualifier must be bytes or str") + if isinstance(new_value, str): + new_value = new_value.encode() + elif isinstance(new_value, int): + if abs(new_value) > _MAX_INCREMENT_VALUE: + raise ValueError( + "int values must be between -2**63 and 2**63 (64-bit signed int)" + ) + new_value = new_value.to_bytes(8, "big", signed=True) + if not isinstance(new_value, bytes): + raise TypeError("new_value must be bytes, str, or int") + if timestamp_micros is None: + # use current timestamp, with milisecond precision + timestamp_micros = time.time_ns() // 1000 + timestamp_micros = timestamp_micros - (timestamp_micros % 1000) + if timestamp_micros < _SERVER_SIDE_TIMESTAMP: + raise ValueError( + f"timestamp_micros must be positive (or {_SERVER_SIDE_TIMESTAMP} for server-side timestamp)" + ) + self.family = family + self.qualifier = qualifier + self.new_value = new_value + self.timestamp_micros = timestamp_micros + + def _to_dict(self) -> dict[str, Any]: + return { + "set_cell": { + "family_name": self.family, + "column_qualifier": self.qualifier, + "timestamp_micros": self.timestamp_micros, + "value": self.new_value, + } + } + + def is_idempotent(self) -> bool: + return self.timestamp_micros != _SERVER_SIDE_TIMESTAMP + + +@dataclass +class DeleteRangeFromColumn(Mutation): + """ + Mutation to delete a range of cells from a column. + + Args: + family: The name of the column family. + qualifier: The column qualifier. + start_timestamp_micros: The start timestamp of the range to + delete. `None` represents 0. Defaults to `None`. + end_timestamp_micros: The end timestamp of the range to + delete. `None` represents infinity. Defaults to `None`. + Raises: + ValueError: If `start_timestamp_micros` is greater than `end_timestamp_micros`. + """ + + family: str + qualifier: bytes + # None represents 0 + start_timestamp_micros: int | None = None + # None represents infinity + end_timestamp_micros: int | None = None + + def __post_init__(self): + if ( + self.start_timestamp_micros is not None + and self.end_timestamp_micros is not None + and self.start_timestamp_micros > self.end_timestamp_micros + ): + raise ValueError("start_timestamp_micros must be <= end_timestamp_micros") + + def _to_dict(self) -> dict[str, Any]: + timestamp_range = {} + if self.start_timestamp_micros is not None: + timestamp_range["start_timestamp_micros"] = self.start_timestamp_micros + if self.end_timestamp_micros is not None: + timestamp_range["end_timestamp_micros"] = self.end_timestamp_micros + return { + "delete_from_column": { + "family_name": self.family, + "column_qualifier": self.qualifier, + "time_range": timestamp_range, + } + } + + +@dataclass +class DeleteAllFromFamily(Mutation): + """ + Mutation to delete all cells from a column family. + + Args: + family_to_delete: The name of the column family to delete. + """ + + family_to_delete: str + + def _to_dict(self) -> dict[str, Any]: + return { + "delete_from_family": { + "family_name": self.family_to_delete, + } + } + + +@dataclass +class DeleteAllFromRow(Mutation): + """ + Mutation to delete all cells from a row. + """ + + def _to_dict(self) -> dict[str, Any]: + return { + "delete_from_row": {}, + } + + +@dataclass +class AddToCell(Mutation): + """ + Adds an int64 value to an aggregate cell. The column family must be an + aggregate family and have an "int64" input type or this mutation will be + rejected. + + Note: The timestamp values are in microseconds but must match the + granularity of the table (defaults to `MILLIS`). Therefore, the given value + must be a multiple of 1000 (millisecond granularity). For example: + `1571902339435000`. + + Args: + family: The name of the column family to which the cell belongs. + qualifier: The column qualifier of the cell. + value: The value to be accumulated into the cell. + timestamp_micros: The timestamp of the cell. Must be provided for + cell aggregation to work correctly. + + + Raises: + TypeError: If `qualifier` is not `bytes` or `str`. + TypeError: If `value` is not `int`. + TypeError: If `timestamp_micros` is not `int`. + ValueError: If `value` is out of bounds for a 64-bit signed int. + ValueError: If `timestamp_micros` is less than 0. + """ + + def __init__( + self, + family: str, + qualifier: bytes | str, + value: int, + timestamp_micros: int, + ): + qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier + if not isinstance(qualifier, bytes): + raise TypeError("qualifier must be bytes or str") + if not isinstance(value, int): + raise TypeError("value must be int") + if not isinstance(timestamp_micros, int): + raise TypeError("timestamp_micros must be int") + if abs(value) > _MAX_INCREMENT_VALUE: + raise ValueError( + "int values must be between -2**63 and 2**63 (64-bit signed int)" + ) + + if timestamp_micros < 0: + raise ValueError("timestamp must be non-negative") + + self.family = family + self.qualifier = qualifier + self.value = value + self.timestamp = timestamp_micros + + def _to_dict(self) -> dict[str, Any]: + return { + "add_to_cell": { + "family_name": self.family, + "column_qualifier": {"raw_value": self.qualifier}, + "timestamp": {"raw_timestamp_micros": self.timestamp}, + "input": {"int_value": self.value}, + } + } + + def is_idempotent(self) -> bool: + return False + + +class RowMutationEntry: + """ + A single entry in a `MutateRows` request. + + This class represents a set of mutations to apply to a specific row in a + Bigtable table. + + Args: + row_key: The key of the row to mutate. + mutations: The mutation or list of mutations to apply + to the row. + + Raises: + ValueError: If `mutations` is empty or contains more than + `_MUTATE_ROWS_REQUEST_MUTATION_LIMIT` mutations. + """ + + def __init__(self, row_key: bytes | str, mutations: Mutation | list[Mutation]): + if isinstance(row_key, str): + row_key = row_key.encode("utf-8") + if isinstance(mutations, Mutation): + mutations = [mutations] + if len(mutations) == 0: + raise ValueError("mutations must not be empty") + elif len(mutations) > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + f"entries must have <= {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations" + ) + self.row_key = row_key + self.mutations = tuple(mutations) + + def _to_dict(self) -> dict[str, Any]: + """ + Convert the mutation entry to a dictionary representation. + + Returns: + dict[str, Any]: A dictionary representation of the mutation entry + """ + return { + "row_key": self.row_key, + "mutations": [mutation._to_dict() for mutation in self.mutations], + } + + def _to_pb(self) -> types_pb.MutateRowsRequest.Entry: + """ + Convert the mutation entry to a protobuf representation. + + Returns: + MutateRowsRequest.Entry: A protobuf representation of the mutation entry. + """ + return types_pb.MutateRowsRequest.Entry( + row_key=self.row_key, + mutations=[mutation._to_pb() for mutation in self.mutations], + ) + + def is_idempotent(self) -> bool: + """ + Check if all mutations in the entry are idempotent. + + Returns: + bool: True if all mutations in the entry are idempotent, False otherwise. + """ + return all(mutation.is_idempotent() for mutation in self.mutations) + + def size(self) -> int: + """ + Get the size of the mutation entry in bytes. + + Returns: + int: The size of the mutation entry in bytes. + """ + return getsizeof(self._to_dict()) + + @classmethod + def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry: + """ + Create a `RowMutationEntry` instance from a dictionary representation. + + Args: + input_dict: A dictionary representation of the mutation entry. + + Returns: + RowMutationEntry: A RowMutationEntry instance created from the dictionary. + """ + return RowMutationEntry( + row_key=input_dict["row_key"], + mutations=[ + Mutation._from_dict(mutation) for mutation in input_dict["mutations"] + ], + ) + + +@dataclass +class _EntryWithProto: + """ + A dataclass to hold a RowMutationEntry and its corresponding proto representation. + + Used in _MutateRowsOperation to avoid repeated conversion of RowMutationEntry to proto. + """ + + entry: RowMutationEntry + proto: types_pb.MutateRowsRequest.Entry diff --git a/google/cloud/bigtable/data/read_modify_write_rules.py b/google/cloud/bigtable/data/read_modify_write_rules.py new file mode 100644 index 000000000..e4446f755 --- /dev/null +++ b/google/cloud/bigtable/data/read_modify_write_rules.py @@ -0,0 +1,112 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +import abc + +import google.cloud.bigtable_v2.types.data as data_pb + +# value must fit in 64-bit signed integer +_MAX_INCREMENT_VALUE = (1 << 63) - 1 + + +class ReadModifyWriteRule(abc.ABC): + """ + Abstract base class for read-modify-write rules. + """ + + def __init__(self, family: str, qualifier: bytes | str): + qualifier = ( + qualifier if isinstance(qualifier, bytes) else qualifier.encode("utf-8") + ) + self.family = family + self.qualifier = qualifier + + @abc.abstractmethod + def _to_dict(self) -> dict[str, str | bytes | int]: + raise NotImplementedError + + def _to_pb(self) -> data_pb.ReadModifyWriteRule: + return data_pb.ReadModifyWriteRule(**self._to_dict()) + + +class IncrementRule(ReadModifyWriteRule): + """ + Rule to increment a cell's value. + + Args: + family: + The family name of the cell to increment. + qualifier: + The qualifier of the cell to increment. + increment_amount: + The amount to increment the cell's value. Must be between -2**63 and 2**63 (64-bit signed int). + Raises: + TypeError: + If increment_amount is not an integer. + ValueError: + If increment_amount is not between -2**63 and 2**63 (64-bit signed int). + """ + + def __init__(self, family: str, qualifier: bytes | str, increment_amount: int = 1): + if not isinstance(increment_amount, int): + raise TypeError("increment_amount must be an integer") + if abs(increment_amount) > _MAX_INCREMENT_VALUE: + raise ValueError( + "increment_amount must be between -2**63 and 2**63 (64-bit signed int)" + ) + super().__init__(family, qualifier) + self.increment_amount = increment_amount + + def _to_dict(self) -> dict[str, str | bytes | int]: + return { + "family_name": self.family, + "column_qualifier": self.qualifier, + "increment_amount": self.increment_amount, + } + + +class AppendValueRule(ReadModifyWriteRule): + """ + Rule to append a value to a cell's value. + + Args: + family: + The family name of the cell to append to. + qualifier: + The qualifier of the cell to append to. + append_value: + The value to append to the cell's value. + Raises: + TypeError: If append_value is not bytes or str. + """ + + def __init__(self, family: str, qualifier: bytes | str, append_value: bytes | str): + append_value = ( + append_value.encode("utf-8") + if isinstance(append_value, str) + else append_value + ) + if not isinstance(append_value, bytes): + raise TypeError("append_value must be bytes or str") + super().__init__(family, qualifier) + self.append_value = append_value + + def _to_dict(self) -> dict[str, str | bytes | int]: + return { + "family_name": self.family, + "column_qualifier": self.qualifier, + "append_value": self.append_value, + } diff --git a/google/cloud/bigtable/data/read_rows_query.py b/google/cloud/bigtable/data/read_rows_query.py new file mode 100644 index 000000000..7652bfbb9 --- /dev/null +++ b/google/cloud/bigtable/data/read_rows_query.py @@ -0,0 +1,536 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations +from typing import TYPE_CHECKING, Any +from bisect import bisect_left +from bisect import bisect_right +from collections import defaultdict +from google.cloud.bigtable.data.row_filters import RowFilter + +from google.cloud.bigtable_v2.types import RowRange as RowRangePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB + +if TYPE_CHECKING: + from google.cloud.bigtable.data import RowKeySamples + from google.cloud.bigtable.data import ShardedQuery + + +class RowRange: + """ + Represents a range of keys in a ReadRowsQuery + + Args: + start_key: The start key of the range. If empty, the range is unbounded on the left. + end_key: The end key of the range. If empty, the range is unbounded on the right. + start_is_inclusive: Whether the start key is inclusive. If None, the start key is + inclusive. + end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. + Raises: + ValueError: if start_key is greater than end_key, or start_is_inclusive + ValueError: if end_is_inclusive is set when the corresponding key is None + ValueError: if start_key or end_key is not a string or bytes. + """ + + __slots__ = ("_pb",) + + def __init__( + self, + start_key: str | bytes | None = None, + end_key: str | bytes | None = None, + start_is_inclusive: bool | None = None, + end_is_inclusive: bool | None = None, + ): + # convert empty key inputs to None for consistency + start_key = None if not start_key else start_key + end_key = None if not end_key else end_key + # check for invalid combinations of arguments + if start_is_inclusive is None: + start_is_inclusive = True + + if end_is_inclusive is None: + end_is_inclusive = False + # ensure that start_key and end_key are bytes + if isinstance(start_key, str): + start_key = start_key.encode() + elif start_key is not None and not isinstance(start_key, bytes): + raise ValueError("start_key must be a string or bytes") + if isinstance(end_key, str): + end_key = end_key.encode() + elif end_key is not None and not isinstance(end_key, bytes): + raise ValueError("end_key must be a string or bytes") + # ensure that start_key is less than or equal to end_key + if start_key is not None and end_key is not None and start_key > end_key: + raise ValueError("start_key must be less than or equal to end_key") + + init_dict = {} + if start_key is not None: + if start_is_inclusive: + init_dict["start_key_closed"] = start_key + else: + init_dict["start_key_open"] = start_key + if end_key is not None: + if end_is_inclusive: + init_dict["end_key_closed"] = end_key + else: + init_dict["end_key_open"] = end_key + self._pb = RowRangePB(**init_dict) + + @property + def start_key(self) -> bytes | None: + """ + Returns the start key of the range. If None, the range is unbounded on the left. + """ + return self._pb.start_key_closed or self._pb.start_key_open or None + + @property + def end_key(self) -> bytes | None: + """ + Returns the end key of the range. If None, the range is unbounded on the right. + + Returns: + bytes | None: The end key of the range, or None if the range is unbounded on the right. + """ + return self._pb.end_key_closed or self._pb.end_key_open or None + + @property + def start_is_inclusive(self) -> bool: + """ + Indicates if the range is inclusive of the start key. + + If the range is unbounded on the left, this will return True. + + Returns: + bool: Whether the range is inclusive of the start key. + """ + return not bool(self._pb.start_key_open) + + @property + def end_is_inclusive(self) -> bool: + """ + Indicates if the range is inclusive of the end key. + + If the range is unbounded on the right, this will return True. + + Returns: + bool: Whether the range is inclusive of the end key. + """ + return not bool(self._pb.end_key_open) + + def _to_pb(self) -> RowRangePB: + """ + Converts this object to a protobuf + + Returns: + RowRangePB: The protobuf representation of this object + """ + return self._pb + + @classmethod + def _from_pb(cls, data: RowRangePB) -> RowRange: + """ + Creates a RowRange from a protobuf + + Args: + data (RowRangePB): The protobuf to convert + Returns: + RowRange: The converted RowRange + """ + instance = cls() + instance._pb = data + return instance + + @classmethod + def _from_dict(cls, data: dict[str, bytes | str]) -> RowRange: + """ + Creates a RowRange from a protobuf + + Args: + data (dict[str, bytes | str]): The dictionary to convert + Returns: + RowRange: The converted RowRange + """ + formatted_data = { + k: v.encode() if isinstance(v, str) else v for k, v in data.items() + } + instance = cls() + instance._pb = RowRangePB(**formatted_data) + return instance + + def __bool__(self) -> bool: + """ + Empty RowRanges (representing a full table scan) are falsy, because + they can be substituted with None. Non-empty RowRanges are truthy. + + Returns: + bool: True if the RowRange is not empty, False otherwise + """ + return bool( + self._pb.start_key_closed + or self._pb.start_key_open + or self._pb.end_key_closed + or self._pb.end_key_open + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, RowRange): + return NotImplemented + return self._pb == other._pb + + def __str__(self) -> str: + """ + Represent range as a string, e.g. "[b'a', b'z)" + + Unbounded start or end keys are represented as "-inf" or "+inf" + + Returns: + str: The string representation of the range + """ + left = "[" if self.start_is_inclusive else "(" + right = "]" if self.end_is_inclusive else ")" + start = repr(self.start_key) if self.start_key is not None else "-inf" + end = repr(self.end_key) if self.end_key is not None else "+inf" + return f"{left}{start}, {end}{right}" + + def __repr__(self) -> str: + args_list = [] + args_list.append(f"start_key={self.start_key!r}") + args_list.append(f"end_key={self.end_key!r}") + if self.start_is_inclusive is False: + # only show start_is_inclusive if it is different from the default + args_list.append(f"start_is_inclusive={self.start_is_inclusive}") + if self.end_is_inclusive is True and self.end_key is not None: + # only show end_is_inclusive if it is different from the default + args_list.append(f"end_is_inclusive={self.end_is_inclusive}") + return f"RowRange({', '.join(args_list)})" + + +class ReadRowsQuery: + """ + Class to encapsulate details of a read row request + + Args: + row_keys: row keys to include in the query + a query can contain multiple keys, but ranges should be preferred + row_ranges: ranges of rows to include in the query + limit: the maximum number of rows to return. None or 0 means no limit + default: None (no limit) + row_filter: a RowFilter to apply to the query + """ + + slots = ("_limit", "_filter", "_row_set") + + def __init__( + self, + row_keys: list[str | bytes] | str | bytes | None = None, + row_ranges: list[RowRange] | RowRange | None = None, + limit: int | None = None, + row_filter: RowFilter | None = None, + ): + if row_keys is None: + row_keys = [] + if row_ranges is None: + row_ranges = [] + if not isinstance(row_ranges, list): + row_ranges = [row_ranges] + if not isinstance(row_keys, list): + row_keys = [row_keys] + row_keys = [key.encode() if isinstance(key, str) else key for key in row_keys] + self._row_set = RowSetPB( + row_keys=row_keys, row_ranges=[r._pb for r in row_ranges] + ) + self.limit = limit or None + self.filter = row_filter + + @property + def row_keys(self) -> list[bytes]: + """ + Return the row keys in this query + + Returns: + list[bytes]: the row keys in this query + """ + return list(self._row_set.row_keys) + + @property + def row_ranges(self) -> list[RowRange]: + """ + Return the row ranges in this query + + Returns: + list[RowRange]: the row ranges in this query + """ + return [RowRange._from_pb(r) for r in self._row_set.row_ranges] + + @property + def limit(self) -> int | None: + """ + Return the maximum number of rows to return by this query + + None or 0 means no limit + + Returns: + int | None: the maximum number of rows to return by this query + """ + return self._limit or None + + @limit.setter + def limit(self, new_limit: int | None): + """ + Set the maximum number of rows to return by this query. + + None or 0 means no limit + + Args: + new_limit: the new limit to apply to this query + Raises: + ValueError: if new_limit is < 0 + """ + if new_limit is not None and new_limit < 0: + raise ValueError("limit must be >= 0") + self._limit = new_limit + + @property + def filter(self) -> RowFilter | None: + """ + Return the RowFilter applied to this query + + Returns: + RowFilter | None: the RowFilter applied to this query + """ + return self._filter + + @filter.setter + def filter(self, row_filter: RowFilter | None): + """ + Set a RowFilter to apply to this query + + Args: + row_filter: a RowFilter to apply to this query + """ + self._filter = row_filter + + def add_key(self, row_key: str | bytes): + """ + Add a row key to this query + + A query can contain multiple keys, but ranges should be preferred + + Args: + row_key: a key to add to this query + Raises: + ValueError: if an input is not a string or bytes + """ + if isinstance(row_key, str): + row_key = row_key.encode() + elif not isinstance(row_key, bytes): + raise ValueError("row_key must be string or bytes") + if row_key not in self._row_set.row_keys: + self._row_set.row_keys.append(row_key) + + def add_range( + self, + row_range: RowRange, + ): + """ + Add a range of row keys to this query. + + Args: + row_range: a range of row keys to add to this query + """ + if row_range not in self.row_ranges: + self._row_set.row_ranges.append(row_range._pb) + + def shard(self, shard_keys: RowKeySamples) -> ShardedQuery: + """ + Split this query into multiple queries that can be evenly distributed + across nodes and run in parallel + + Args: + shard_keys: a list of row keys that define the boundaries of segments. + Returns: + ShardedQuery: a ShardedQuery that can be used in sharded_read_rows calls + Raises: + AttributeError: if the query contains a limit + """ + if self.limit is not None: + raise AttributeError("Cannot shard query with a limit") + if len(self.row_keys) == 0 and len(self.row_ranges) == 0: + # empty query represents full scan + # ensure that we have at least one key or range + full_scan_query = ReadRowsQuery( + row_ranges=RowRange(), row_filter=self.filter + ) + return full_scan_query.shard(shard_keys) + + sharded_queries: dict[int, ReadRowsQuery] = defaultdict( + lambda: ReadRowsQuery(row_filter=self.filter) + ) + # the split_points divde our key space into segments + # each split_point defines last key that belongs to a segment + # our goal is to break up the query into subqueries that each operate in a single segment + split_points = [sample[0] for sample in shard_keys if sample[0]] + + # handle row_keys + # use binary search to find the segment that each key belongs to + for this_key in list(self.row_keys): + # bisect_left: in case of exact match, pick left side (keys are inclusive ends) + segment_index = bisect_left(split_points, this_key) + sharded_queries[segment_index].add_key(this_key) + + # handle row_ranges + for this_range in self.row_ranges: + # defer to _shard_range helper + for segment_index, added_range in self._shard_range( + this_range, split_points + ): + sharded_queries[segment_index].add_range(added_range) + # return list of queries ordered by segment index + # pull populated segments out of sharded_queries dict + keys = sorted(list(sharded_queries.keys())) + # return list of queries + return [sharded_queries[k] for k in keys] + + @staticmethod + def _shard_range( + orig_range: RowRange, split_points: list[bytes] + ) -> list[tuple[int, RowRange]]: + """ + Helper function for sharding row_range into subranges that fit into + segments of the key-space, determined by split_points + + Args: + orig_range: a row range to split + split_points: a list of row keys that define the boundaries of segments. + each point represents the inclusive end of a segment + Returns: + list[tuple[int, RowRange]]: a list of tuples, containing a segment index and a new sub-range. + """ + # 1. find the index of the segment the start key belongs to + if orig_range.start_key is None: + # if range is open on the left, include first segment + start_segment = 0 + else: + # use binary search to find the segment the start key belongs to + # bisect method determines how we break ties when the start key matches a split point + # if inclusive, bisect_left to the left segment, otherwise bisect_right + bisect = bisect_left if orig_range.start_is_inclusive else bisect_right + start_segment = bisect(split_points, orig_range.start_key) + + # 2. find the index of the segment the end key belongs to + if orig_range.end_key is None: + # if range is open on the right, include final segment + end_segment = len(split_points) + else: + # use binary search to find the segment the end key belongs to. + end_segment = bisect_left( + split_points, orig_range.end_key, lo=start_segment + ) + # note: end_segment will always bisect_left, because split points represent inclusive ends + # whether the end_key is includes the split point or not, the result is the same segment + # 3. create new range definitions for each segment this_range spans + if start_segment == end_segment: + # this_range is contained in a single segment. + # Add this_range to that segment's query only + return [(start_segment, orig_range)] + else: + results: list[tuple[int, RowRange]] = [] + # this_range spans multiple segments. Create a new range for each segment's query + # 3a. add new range for first segment this_range spans + # first range spans from start_key to the split_point representing the last key in the segment + last_key_in_first_segment = split_points[start_segment] + start_range = RowRange( + start_key=orig_range.start_key, + start_is_inclusive=orig_range.start_is_inclusive, + end_key=last_key_in_first_segment, + end_is_inclusive=True, + ) + results.append((start_segment, start_range)) + # 3b. add new range for last segment this_range spans + # we start the final range using the end key from of the previous segment, with is_inclusive=False + previous_segment = end_segment - 1 + last_key_before_segment = split_points[previous_segment] + end_range = RowRange( + start_key=last_key_before_segment, + start_is_inclusive=False, + end_key=orig_range.end_key, + end_is_inclusive=orig_range.end_is_inclusive, + ) + results.append((end_segment, end_range)) + # 3c. add new spanning range to all segments other than the first and last + for this_segment in range(start_segment + 1, end_segment): + prev_segment = this_segment - 1 + prev_end_key = split_points[prev_segment] + this_end_key = split_points[prev_segment + 1] + new_range = RowRange( + start_key=prev_end_key, + start_is_inclusive=False, + end_key=this_end_key, + end_is_inclusive=True, + ) + results.append((this_segment, new_range)) + return results + + def _to_pb(self, table) -> ReadRowsRequestPB: + """ + Convert this query into a dictionary that can be used to construct a + ReadRowsRequest protobuf + """ + return ReadRowsRequestPB( + app_profile_id=table.app_profile_id, + filter=self.filter._to_pb() if self.filter else None, + rows_limit=self.limit or 0, + rows=self._row_set, + **table._request_path, + ) + + def __eq__(self, other): + """ + RowRanges are equal if they have the same row keys, row ranges, + filter and limit, or if they both represent a full scan with the + same filter and limit + + Args: + other: the object to compare to + Returns: + bool: True if the objects are equal, False otherwise + """ + if not isinstance(other, ReadRowsQuery): + return False + # empty queries are equal + if len(self.row_keys) == 0 and len(other.row_keys) == 0: + this_range_empty = len(self.row_ranges) == 0 or all( + [bool(r) is False for r in self.row_ranges] + ) + other_range_empty = len(other.row_ranges) == 0 or all( + [bool(r) is False for r in other.row_ranges] + ) + if this_range_empty and other_range_empty: + return self.filter == other.filter and self.limit == other.limit + # otherwise, sets should have same sizes + if len(self.row_keys) != len(other.row_keys): + return False + if len(self.row_ranges) != len(other.row_ranges): + return False + ranges_match = all([row in other.row_ranges for row in self.row_ranges]) + return ( + self.row_keys == other.row_keys + and ranges_match + and self.filter == other.filter + and self.limit == other.limit + ) + + def __repr__(self): + return f"ReadRowsQuery(row_keys={list(self.row_keys)}, row_ranges={list(self.row_ranges)}, row_filter={self.filter}, limit={self.limit})" diff --git a/google/cloud/bigtable/data/row.py b/google/cloud/bigtable/data/row.py new file mode 100644 index 000000000..50e65a958 --- /dev/null +++ b/google/cloud/bigtable/data/row.py @@ -0,0 +1,535 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from collections import OrderedDict +from typing import Generator, overload, Any +from functools import total_ordering + +from google.cloud.bigtable_v2.types import Row as RowPB + +# Type aliases used internally for readability. +_family_type = str +_qualifier_type = bytes + + +class Row: + """ + Model class for row data returned from server + + Does not represent all data contained in the row, only data returned by a + query. + Expected to be read-only to users, and written by backend + + Can be indexed by family and qualifier to get cells in the row:: + + cells = row["family", "qualifier"] + + Args: + key: Row key + cells: List of cells in the row + """ + + __slots__ = ("row_key", "cells", "_index_data") + + def __init__( + self, + key: bytes, + cells: list[Cell], + ): + """ + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + """ + self.row_key = key + self.cells: list[Cell] = cells + # index is lazily created when needed + self._index_data: OrderedDict[ + _family_type, OrderedDict[_qualifier_type, list[Cell]] + ] | None = None + + @property + def _index( + self, + ) -> OrderedDict[_family_type, OrderedDict[_qualifier_type, list[Cell]]]: + """ + Returns an index of cells associated with each family and qualifier. + + The index is lazily created when needed + + Returns: + OrderedDict: Index of cells + """ + if self._index_data is None: + self._index_data = OrderedDict() + for cell in self.cells: + self._index_data.setdefault(cell.family, OrderedDict()).setdefault( + cell.qualifier, [] + ).append(cell) + return self._index_data + + @classmethod + def _from_pb(cls, row_pb: RowPB) -> Row: + """ + Creates a row from a protobuf representation + + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + + Args: + row_pb (RowPB): Protobuf representation of the row + Returns: + Row: Row object created from the protobuf representation + """ + row_key: bytes = row_pb.key + cell_list: list[Cell] = [] + for family in row_pb.families: + for column in family.columns: + for cell in column.cells: + new_cell = Cell( + value=cell.value, + row_key=row_key, + family=family.name, + qualifier=column.qualifier, + timestamp_micros=cell.timestamp_micros, + labels=list(cell.labels) if cell.labels else None, + ) + cell_list.append(new_cell) + return cls(row_key, cells=cell_list) + + def get_cells( + self, family: str | None = None, qualifier: str | bytes | None = None + ) -> list[Cell]: + """ + Returns cells sorted in Bigtable native order: + - Family lexicographically ascending + - Qualifier ascending + - Timestamp in reverse chronological order + + If family or qualifier not passed, will include all + + Can also be accessed through indexing:: + cells = row["family", "qualifier"] + cells = row["family"] + + Args: + family: family to filter cells by + qualifier: qualifier to filter cells by + Returns: + list[Cell]: List of cells in the row matching the filter + Raises: + ValueError: If family or qualifier is not found in the row + """ + if family is None: + if qualifier is not None: + # get_cells(None, "qualifier") is not allowed + raise ValueError("Qualifier passed without family") + else: + # return all cells on get_cells() + return self.cells + if qualifier is None: + # return all cells in family on get_cells(family) + return list(self._get_all_from_family(family)) + if isinstance(qualifier, str): + qualifier = qualifier.encode("utf-8") + # return cells in family and qualifier on get_cells(family, qualifier) + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + if qualifier not in self._index[family]: + raise ValueError( + f"Qualifier '{qualifier!r}' not found in family '{family}' in row '{self.row_key!r}'" + ) + return self._index[family][qualifier] + + def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: + """ + Returns all cells in the row for the family_id + + Args: + family: family to filter cells by + Yields: + Cell: cells in the row for the family_id + Raises: + ValueError: If family is not found in the row + """ + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + for qualifier in self._index[family]: + yield from self._index[family][qualifier] + + def __str__(self) -> str: + """ + Human-readable string representation:: + + { + (family='fam', qualifier=b'col'): [b'value', (+1 more),], + (family='fam', qualifier=b'col2'): [b'other'], + } + + Returns: + str: Human-readable string representation of the row + """ + output = ["{"] + for family, qualifier in self._get_column_components(): + cell_list = self[family, qualifier] + line = [f" (family={family!r}, qualifier={qualifier!r}): "] + if len(cell_list) == 0: + line.append("[],") + elif len(cell_list) == 1: + line.append(f"[{cell_list[0]}],") + else: + line.append(f"[{cell_list[0]}, (+{len(cell_list) - 1} more)],") + output.append("".join(line)) + output.append("}") + return "\n".join(output) + + def __repr__(self): + cell_str_buffer = ["{"] + for family, qualifier in self._get_column_components(): + cell_list = self[family, qualifier] + repr_list = [cell._to_dict() for cell in cell_list] + cell_str_buffer.append(f" ('{family}', {qualifier!r}): {repr_list},") + cell_str_buffer.append("}") + cell_str = "\n".join(cell_str_buffer) + output = f"Row(key={self.row_key!r}, cells={cell_str})" + return output + + def _to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Row + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#row + """ + family_list = [] + for family_name, qualifier_dict in self._index.items(): + qualifier_list = [] + for qualifier_name, cell_list in qualifier_dict.items(): + cell_dicts = [cell._to_dict() for cell in cell_list] + qualifier_list.append( + {"qualifier": qualifier_name, "cells": cell_dicts} + ) + family_list.append({"name": family_name, "columns": qualifier_list}) + return {"key": self.row_key, "families": family_list} + + # Sequence and Mapping methods + def __iter__(self): + """ + Allow iterating over all cells in the row + + Returns: + Iterator: Iterator over the cells in the row + """ + return iter(self.cells) + + def __contains__(self, item): + """ + Implements `in` operator + + Works for both cells in the internal list, and `family` or + `(family, qualifier)` pairs associated with the cells + + Args: + item: item to check for in the row + Returns: + bool: True if item is in the row, False otherwise + """ + if isinstance(item, _family_type): + return item in self._index + elif ( + isinstance(item, tuple) + and isinstance(item[0], _family_type) + and isinstance(item[1], (bytes, str)) + ): + q = item[1] if isinstance(item[1], bytes) else item[1].encode("utf-8") + return item[0] in self._index and q in self._index[item[0]] + # check if Cell is in Row + return item in self.cells + + @overload + def __getitem__( + self, + index: str | tuple[str, bytes | str], + ) -> list[Cell]: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: int) -> Cell: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[Cell]: + # overload signature for type checking + pass + + def __getitem__(self, index): + """ + Implements [] indexing + + Supports indexing by family, (family, qualifier) pair, + numerical index, and index slicing + """ + if isinstance(index, _family_type): + return self.get_cells(family=index) + elif ( + isinstance(index, tuple) + and isinstance(index[0], _family_type) + and isinstance(index[1], (bytes, str)) + ): + return self.get_cells(family=index[0], qualifier=index[1]) + elif isinstance(index, int) or isinstance(index, slice): + # index is int or slice + return self.cells[index] + else: + raise TypeError( + "Index must be family_id, (family_id, qualifier), int, or slice" + ) + + def __len__(self): + """ + Returns the number of cells in the row + + Returns: + int: Number of cells in the row + """ + return len(self.cells) + + def _get_column_components(self) -> list[tuple[str, bytes]]: + """ + Returns a list of (family, qualifier) pairs associated with the cells + + Pairs can be used for indexing + + Returns: + list[tuple[str, bytes]]: List of (family, qualifier) pairs + """ + return [(f, q) for f in self._index for q in self._index[f]] + + def __eq__(self, other): + """ + Implements `==` operator + + Returns: + bool: True if rows are equal, False otherwise + """ + # for performance reasons, check row metadata + # before checking individual cells + if not isinstance(other, Row): + return False + if self.row_key != other.row_key: + return False + if len(self.cells) != len(other.cells): + return False + components = self._get_column_components() + other_components = other._get_column_components() + if len(components) != len(other_components): + return False + if components != other_components: + return False + for family, qualifier in components: + if len(self[family, qualifier]) != len(other[family, qualifier]): + return False + # compare individual cell lists + if self.cells != other.cells: + return False + return True + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + + Returns: + bool: True if rows are not equal, False otherwise + """ + return not self == other + + +@total_ordering +class Cell: + """ + Model class for cell data + + Does not represent all data contained in the cell, only data returned by a + query. + Expected to be read-only to users, and written by backend + + Args: + value: the byte string value of the cell + row_key: the row key of the cell + family: the family associated with the cell + qualifier: the column qualifier associated with the cell + timestamp_micros: the timestamp of the cell in microseconds + labels: the list of labels associated with the cell + """ + + __slots__ = ( + "value", + "row_key", + "family", + "qualifier", + "timestamp_micros", + "labels", + ) + + def __init__( + self, + value: bytes, + row_key: bytes, + family: str, + qualifier: bytes | str, + timestamp_micros: int, + labels: list[str] | None = None, + ): + # Cell objects are not intended to be constructed by users. + # They are returned by the Bigtable backend. + self.value = value + self.row_key = row_key + self.family = family + if isinstance(qualifier, str): + qualifier = qualifier.encode() + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels if labels is not None else [] + + def __int__(self) -> int: + """ + Allows casting cell to int + Interprets value as a 64-bit big-endian signed integer, as expected by + ReadModifyWrite increment rule + + Returns: + int: Value of the cell as a 64-bit big-endian signed integer + """ + return int.from_bytes(self.value, byteorder="big", signed=True) + + def _to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Cell + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell + + Returns: + dict: Dictionary representation of the cell + """ + cell_dict: dict[str, Any] = { + "value": self.value, + } + cell_dict["timestamp_micros"] = self.timestamp_micros + if self.labels: + cell_dict["labels"] = self.labels + return cell_dict + + def __str__(self) -> str: + """ + Allows casting cell to str + Prints encoded byte string, same as printing value directly. + + Returns: + str: Encoded byte string of the value + """ + return str(self.value) + + def __repr__(self): + """ + Returns a string representation of the cell + + Returns: + str: String representation of the cell + """ + return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})" + + """For Bigtable native ordering""" + + def __lt__(self, other) -> bool: + """ + Implements `<` operator + + Args: + other: Cell to compare with + Returns: + bool: True if this cell is less than the other cell, False otherwise + Raises: + NotImplementedError: If other is not a Cell + """ + if not isinstance(other, Cell): + raise NotImplementedError + this_ordering = ( + self.family, + self.qualifier, + -self.timestamp_micros, + self.value, + self.labels, + ) + other_ordering = ( + other.family, + other.qualifier, + -other.timestamp_micros, + other.value, + other.labels, + ) + return this_ordering < other_ordering + + def __eq__(self, other) -> bool: + """ + Implements `==` operator + + Args: + other: Cell to compare with + Returns: + bool: True if cells are equal, False otherwise + """ + if not isinstance(other, Cell): + return False + return ( + self.row_key == other.row_key + and self.family == other.family + and self.qualifier == other.qualifier + and self.value == other.value + and self.timestamp_micros == other.timestamp_micros + and len(self.labels) == len(other.labels) + and all([label in other.labels for label in self.labels]) + ) + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + + Args: + other: Cell to compare with + Returns: + bool: True if cells are not equal, False otherwise + """ + return not self == other + + def __hash__(self): + """ + Implements `hash()` function to fingerprint cell + + Returns: + int: hash value of the cell + """ + return hash( + ( + self.row_key, + self.family, + self.qualifier, + self.value, + self.timestamp_micros, + tuple(self.labels), + ) + ) diff --git a/google/cloud/bigtable/data/row_filters.py b/google/cloud/bigtable/data/row_filters.py new file mode 100644 index 000000000..9f09133d5 --- /dev/null +++ b/google/cloud/bigtable/data/row_filters.py @@ -0,0 +1,968 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Filters for Google Cloud Bigtable Row classes.""" +from __future__ import annotations + +import struct + +from typing import Any, Sequence, TYPE_CHECKING, overload +from abc import ABC, abstractmethod + +from google.cloud._helpers import _microseconds_from_datetime # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore +from google.cloud.bigtable_v2.types import data as data_v2_pb2 + +if TYPE_CHECKING: + # import dependencies when type checking + from datetime import datetime + +_PACK_I64 = struct.Struct(">q").pack + + +class RowFilter(ABC): + """Basic filter to apply to cells in a row. + + These values can be combined via :class:`RowFilterChain`, + :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. + + .. note:: + + This class is a do-nothing base class for all row filters. + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + return data_v2_pb2.RowFilter(**self._to_dict()) + + @abstractmethod + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + pass + + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" + + +class _BoolFilter(RowFilter, ABC): + """Row filter that uses a boolean flag. + + :type flag: bool + :param flag: An indicator if a setting is turned on or off. + """ + + def __init__(self, flag: bool): + self.flag = flag + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.flag == self.flag + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(flag={self.flag})" + + +class SinkFilter(_BoolFilter): + """Advanced row filter to skip parent filters. + + :type flag: bool + :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Cannot be used within the + ``predicate_filter``, ``true_filter``, or ``false_filter`` + of a :class:`ConditionalRowFilter`. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"sink": self.flag} + + +class PassAllFilter(_BoolFilter): + """Row filter equivalent to not filtering at all. + + :type flag: bool + :param flag: Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"pass_all_filter": self.flag} + + +class BlockAllFilter(_BoolFilter): + """Row filter that doesn't match any cells. + + :type flag: bool + :param flag: Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"block_all_filter": self.flag} + + +class _RegexFilter(RowFilter, ABC): + """Row filter that uses a regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: bytes or str + :param regex: + A regular expression (RE2) for some row filter. String values + will be encoded as ASCII. + """ + + def __init__(self, regex: str | bytes): + self.regex: bytes = _to_bytes(regex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.regex == self.regex + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(regex={self.regex!r})" + + +class RowKeyRegexFilter(_RegexFilter): + """Row filter for a row key regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from rows with row + keys that satisfy this regex. For a + ``CheckAndMutateRowRequest``, this filter is unnecessary + since the row key is already specified. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_key_regex_filter": self.regex} + + +class RowSampleFilter(RowFilter): + """Matches all cells from a row with probability p. + + :type sample: float + :param sample: The probability of matching a cell (must be in the + interval ``(0, 1)`` The end points are excluded). + """ + + def __init__(self, sample: float): + self.sample: float = sample + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.sample == self.sample + + def __ne__(self, other): + return not self == other + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_sample_filter": self.sample} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(sample={self.sample})" + + +class FamilyNameRegexFilter(_RegexFilter): + """Row filter for a family name regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: str + :param regex: A regular expression (RE2) to match cells from columns in a + given column family. For technical reasons, the regex must + not contain the ``':'`` character, even if it is not being + used as a literal. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"family_name_regex_filter": self.regex} + + +class ColumnQualifierRegexFilter(_RegexFilter): + """Row filter for a column qualifier regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from column that + match this regex (irrespective of column family). + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_qualifier_regex_filter": self.regex} + + +class TimestampRange(object): + """Range of time with inclusive lower and exclusive upper bounds. + + :type start: :class:`datetime.datetime` + :param start: (Optional) The (inclusive) lower bound of the timestamp + range. If omitted, defaults to Unix epoch. + + :type end: :class:`datetime.datetime` + :param end: (Optional) The (exclusive) upper bound of the timestamp + range. If omitted, no upper bound is used. + """ + + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.start: "datetime" | None = start + self.end: "datetime" | None = end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.start == self.start and other.end == self.end + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.TimestampRange: + """Converts the :class:`TimestampRange` to a protobuf. + + Returns: The converted current object. + """ + return data_v2_pb2.TimestampRange(**self._to_dict()) + + def _to_dict(self) -> dict[str, int]: + """Converts the timestamp range to a dict representation.""" + timestamp_range_kwargs = {} + if self.start is not None: + start_time = _microseconds_from_datetime(self.start) // 1000 * 1000 + timestamp_range_kwargs["start_timestamp_micros"] = start_time + if self.end is not None: + end_time = _microseconds_from_datetime(self.end) + if end_time % 1000 != 0: + # if not a whole milisecond value, round up + end_time = end_time // 1000 * 1000 + 1000 + timestamp_range_kwargs["end_timestamp_micros"] = end_time + return timestamp_range_kwargs + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.start}, end={self.end})" + + +class TimestampRangeFilter(RowFilter): + """Row filter that limits cells to a range of time. + + :type range_: :class:`TimestampRange` + :param range_: Range of time that cells should match against. + """ + + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.range_: TimestampRange = TimestampRange(start, end) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.range_ == self.range_ + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts the ``range_`` on the current object to a protobuf and + then uses it in the ``timestamp_range_filter`` field. + + Returns: The converted current object. + """ + return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_._to_pb()) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"timestamp_range_filter": self.range_._to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.range_.start!r}, end={self.range_.end!r})" + + +class ColumnRangeFilter(RowFilter): + """A row filter to restrict to a range of columns. + + Both the start and end column can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type family_id: str + :param family_id: The column family that contains the columns. Must + be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type start_qualifier: bytes + :param start_qualifier: The start of the range of columns. If no value is + used, the backend applies no upper bound to the + values. + + :type end_qualifier: bytes + :param end_qualifier: The end of the range of columns. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start column should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_qualifier`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end column should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_qualifier`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_qualifier`` is given or if ``inclusive_end`` + is set but no ``end_qualifier`` is given + """ + + def __init__( + self, + family_id: str, + start_qualifier: bytes | None = None, + end_qualifier: bytes | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, + ): + if inclusive_start is None: + inclusive_start = True + elif start_qualifier is None: + raise ValueError( + "inclusive_start was specified but no start_qualifier was given." + ) + if inclusive_end is None: + inclusive_end = True + elif end_qualifier is None: + raise ValueError( + "inclusive_end was specified but no end_qualifier was given." + ) + + self.family_id = family_id + + self.start_qualifier = start_qualifier + self.inclusive_start = inclusive_start + + self.end_qualifier = end_qualifier + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.family_id == self.family_id + and other.start_qualifier == self.start_qualifier + and other.end_qualifier == self.end_qualifier + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it + in the ``column_range_filter`` field. + + Returns: The converted current object. + """ + column_range = data_v2_pb2.ColumnRange(**self._range_to_dict()) + return data_v2_pb2.RowFilter(column_range_filter=column_range) + + def _range_to_dict(self) -> dict[str, str | bytes]: + """Converts the column range range to a dict representation.""" + column_range_kwargs: dict[str, str | bytes] = {} + column_range_kwargs["family_name"] = self.family_id + if self.start_qualifier is not None: + if self.inclusive_start: + key = "start_qualifier_closed" + else: + key = "start_qualifier_open" + column_range_kwargs[key] = _to_bytes(self.start_qualifier) + if self.end_qualifier is not None: + if self.inclusive_end: + key = "end_qualifier_closed" + else: + key = "end_qualifier_open" + column_range_kwargs[key] = _to_bytes(self.end_qualifier) + return column_range_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_range_filter": self._range_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(family_id='{self.family_id}', start_qualifier={self.start_qualifier!r}, end_qualifier={self.end_qualifier!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + + +class ValueRegexFilter(_RegexFilter): + """Row filter for a value regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes or str + :param regex: A regular expression (RE2) to match cells with values that + match this regex. String values will be encoded as ASCII. + """ + + def _to_dict(self) -> dict[str, bytes]: + """Converts the row filter to a dict representation.""" + return {"value_regex_filter": self.regex} + + +class LiteralValueFilter(ValueRegexFilter): + """Row filter for an exact value. + + + :type value: bytes or str or int + :param value: + a literal string, integer, or the equivalent bytes. + Integer values will be packed into signed 8-bytes. + """ + + def __init__(self, value: bytes | str | int): + if isinstance(value, int): + value = _PACK_I64(value) + elif isinstance(value, str): + value = value.encode("utf-8") + value = self._write_literal_regex(value) + super(LiteralValueFilter, self).__init__(value) + + @staticmethod + def _write_literal_regex(input_bytes: bytes) -> bytes: + """ + Escape re2 special characters from literal bytes. + + Extracted from: re2 QuoteMeta: + https://github.com/google/re2/blob/70f66454c255080a54a8da806c52d1f618707f8a/re2/re2.cc#L456 + """ + result = bytearray() + for byte in input_bytes: + # If this is the part of a UTF8 or Latin1 character, we need \ + # to copy this byte without escaping. Experimentally this is \ + # what works correctly with the regexp library. \ + utf8_latin1_check = (byte & 128) == 0 + if ( + (byte < ord("a") or byte > ord("z")) + and (byte < ord("A") or byte > ord("Z")) + and (byte < ord("0") or byte > ord("9")) + and byte != ord("_") + and utf8_latin1_check + ): + if byte == 0: + # Special handling for null chars. + # Note that this special handling is not strictly required for RE2, + # but this quoting is required for other regexp libraries such as + # PCRE. + # Can't use "\\0" since the next character might be a digit. + result.extend([ord("\\"), ord("x"), ord("0"), ord("0")]) + continue + result.append(ord(b"\\")) + result.append(byte) + return bytes(result) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(value={self.regex!r})" + + +class ValueRangeFilter(RowFilter): + """A range of values to restrict to in a row filter. + + Will only match cells that have values in this range. + + Both the start and end value can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type start_value: bytes + :param start_value: The start of the range of values. If no value is used, + the backend applies no lower bound to the values. + + :type end_value: bytes + :param end_value: The end of the range of values. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start value should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_value`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end value should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_value`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_value`` is given or if ``inclusive_end`` + is set but no ``end_value`` is given + """ + + def __init__( + self, + start_value: bytes | int | None = None, + end_value: bytes | int | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, + ): + if inclusive_start is None: + inclusive_start = True + elif start_value is None: + raise ValueError( + "inclusive_start was specified but no start_value was given." + ) + if inclusive_end is None: + inclusive_end = True + elif end_value is None: + raise ValueError( + "inclusive_end was specified but no end_qualifier was given." + ) + if isinstance(start_value, int): + start_value = _PACK_I64(start_value) + self.start_value = start_value + self.inclusive_start = inclusive_start + + if isinstance(end_value, int): + end_value = _PACK_I64(end_value) + self.end_value = end_value + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.start_value == self.start_value + and other.end_value == self.end_value + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses + it to create a row filter protobuf. + + Returns: The converted current object. + """ + value_range = data_v2_pb2.ValueRange(**self._range_to_dict()) + return data_v2_pb2.RowFilter(value_range_filter=value_range) + + def _range_to_dict(self) -> dict[str, bytes]: + """Converts the value range range to a dict representation.""" + value_range_kwargs = {} + if self.start_value is not None: + if self.inclusive_start: + key = "start_value_closed" + else: + key = "start_value_open" + value_range_kwargs[key] = _to_bytes(self.start_value) + if self.end_value is not None: + if self.inclusive_end: + key = "end_value_closed" + else: + key = "end_value_open" + value_range_kwargs[key] = _to_bytes(self.end_value) + return value_range_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"value_range_filter": self._range_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start_value={self.start_value!r}, end_value={self.end_value!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + + +class _CellCountFilter(RowFilter, ABC): + """Row filter that uses an integer count of cells. + + The cell count is used as an offset or a limit for the number + of results returned. + + :type num_cells: int + :param num_cells: An integer count / offset / limit. + """ + + def __init__(self, num_cells: int): + self.num_cells = num_cells + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.num_cells == self.num_cells + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(num_cells={self.num_cells})" + + +class CellsRowOffsetFilter(_CellCountFilter): + """Row filter to skip cells in a row. + + :type num_cells: int + :param num_cells: Skips the first N cells of the row. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_offset_filter": self.num_cells} + + +class CellsRowLimitFilter(_CellCountFilter): + """Row filter to limit cells in a row. + + :type num_cells: int + :param num_cells: Matches only the first N cells of the row. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_limit_filter": self.num_cells} + + +class CellsColumnLimitFilter(_CellCountFilter): + """Row filter to limit cells in a column. + + :type num_cells: int + :param num_cells: Matches only the most recent N cells within each column. + This filters a (family name, column) pair, based on + timestamps of each cell. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_column_limit_filter": self.num_cells} + + +class StripValueTransformerFilter(_BoolFilter): + """Row filter that transforms cells into empty string (0 bytes). + + :type flag: bool + :param flag: If :data:`True`, replaces each cell's value with the empty + string. As the name indicates, this is more useful as a + transformer than a generic query / filter. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"strip_value_transformer": self.flag} + + +class ApplyLabelFilter(RowFilter): + """Filter to apply labels to cells. + + Intended to be used as an intermediate filter on a pre-existing filtered + result set. This way if two sets are combined, the label can tell where + the cell(s) originated.This allows the client to determine which results + were produced from which part of the filter. + + .. note:: + + Due to a technical limitation of the backend, it is not currently + possible to apply multiple labels to a cell. + + :type label: str + :param label: Label to apply to cells in the output row. Values must be + at most 15 characters long, and match the pattern + ``[a-z0-9\\-]+``. + """ + + def __init__(self, label: str): + self.label = label + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.label == self.label + + def __ne__(self, other): + return not self == other + + def _to_dict(self) -> dict[str, str]: + """Converts the row filter to a dict representation.""" + return {"apply_label_transformer": self.label} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(label={self.label})" + + +class _FilterCombination(RowFilter, Sequence[RowFilter], ABC): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def __init__(self, filters: list[RowFilter] | None = None): + if filters is None: + filters = [] + self.filters: list[RowFilter] = filters + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.filters == self.filters + + def __ne__(self, other): + return not self == other + + def __len__(self) -> int: + return len(self.filters) + + @overload + def __getitem__(self, index: int) -> RowFilter: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[RowFilter]: + # overload signature for type checking + pass + + def __getitem__(self, index): + return self.filters[index] + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(filters={self.filters})" + + def __str__(self) -> str: + """ + Returns a string representation of the filter chain. + + Adds line breaks between each sub-filter for readability. + """ + output = [f"{self.__class__.__name__}(["] + for filter_ in self.filters: + filter_lines = f"{filter_},".splitlines() + output.extend([f" {line}" for line in filter_lines]) + output.append("])") + return "\n".join(output) + + +class RowFilterChain(_FilterCombination): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + chain = data_v2_pb2.RowFilter.Chain( + filters=[row_filter._to_pb() for row_filter in self.filters] + ) + return data_v2_pb2.RowFilter(chain=chain) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"chain": {"filters": [f._to_dict() for f in self.filters]}} + + +class RowFilterUnion(_FilterCombination): + """Union of row filters. + + Sends rows through several filters simultaneously, then + merges / interleaves all the filtered results together. + + If multiple cells are produced with the same column and timestamp, + they will all appear in the output row in an unspecified mutual order. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + interleave = data_v2_pb2.RowFilter.Interleave( + filters=[row_filter._to_pb() for row_filter in self.filters] + ) + return data_v2_pb2.RowFilter(interleave=interleave) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"interleave": {"filters": [f._to_dict() for f in self.filters]}} + + +class ConditionalRowFilter(RowFilter): + """Conditional row filter which exhibits ternary behavior. + + Executes one of two filters based on another filter. If the ``predicate_filter`` + returns any cells in the row, then ``true_filter`` is executed. If not, + then ``false_filter`` is executed. + + .. note:: + + The ``predicate_filter`` does not execute atomically with the true and false + filters, which may lead to inconsistent or unexpected results. + + Additionally, executing a :class:`ConditionalRowFilter` has poor + performance on the server, especially when ``false_filter`` is set. + + :type predicate_filter: :class:`RowFilter` + :param predicate_filter: The filter to condition on before executing the + true/false filters. + + :type true_filter: :class:`RowFilter` + :param true_filter: (Optional) The filter to execute if there are any cells + matching ``predicate_filter``. If not provided, no results + will be returned in the true case. + + :type false_filter: :class:`RowFilter` + :param false_filter: (Optional) The filter to execute if there are no cells + matching ``predicate_filter``. If not provided, no results + will be returned in the false case. + """ + + def __init__( + self, + predicate_filter: RowFilter, + true_filter: RowFilter | None = None, + false_filter: RowFilter | None = None, + ): + self.predicate_filter = predicate_filter + self.true_filter = true_filter + self.false_filter = false_filter + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.predicate_filter == self.predicate_filter + and other.true_filter == self.true_filter + and other.false_filter == self.false_filter + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + condition_kwargs = {"predicate_filter": self.predicate_filter._to_pb()} + if self.true_filter is not None: + condition_kwargs["true_filter"] = self.true_filter._to_pb() + if self.false_filter is not None: + condition_kwargs["false_filter"] = self.false_filter._to_pb() + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition) + + def _condition_to_dict(self) -> dict[str, Any]: + """Converts the condition to a dict representation.""" + condition_kwargs = {"predicate_filter": self.predicate_filter._to_dict()} + if self.true_filter is not None: + condition_kwargs["true_filter"] = self.true_filter._to_dict() + if self.false_filter is not None: + condition_kwargs["false_filter"] = self.false_filter._to_dict() + return condition_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"condition": self._condition_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(predicate_filter={self.predicate_filter!r}, true_filter={self.true_filter!r}, false_filter={self.false_filter!r})" + + def __str__(self) -> str: + output = [f"{self.__class__.__name__}("] + for filter_type in ("predicate_filter", "true_filter", "false_filter"): + filter_ = getattr(self, filter_type) + if filter_ is None: + continue + # add the new filter set, adding indentations for readability + filter_lines = f"{filter_type}={filter_},".splitlines() + output.extend(f" {line}" for line in filter_lines) + output.append(")") + return "\n".join(output) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index e546bae05..a105a8349 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.35.0" # {x-release-please-version} diff --git a/google/cloud/bigtable/helpers.py b/google/cloud/bigtable/helpers.py new file mode 100644 index 000000000..78af43089 --- /dev/null +++ b/google/cloud/bigtable/helpers.py @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TypeVar, Iterable, Generator, Tuple + +from itertools import islice + +T = TypeVar("T") + + +# batched landed in standard library in Python 3.11. +def batched(iterable: Iterable[T], n) -> Generator[Tuple[T, ...], None, None]: + # batched('ABCDEFG', 3) → ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + batch = tuple(islice(it, n)) + while batch: + yield batch + batch = tuple(islice(it, n)) diff --git a/google/cloud/bigtable/instance.py b/google/cloud/bigtable/instance.py index 6d092cefd..23fb1c95d 100644 --- a/google/cloud/bigtable/instance.py +++ b/google/cloud/bigtable/instance.py @@ -32,6 +32,7 @@ import warnings + _INSTANCE_NAME_RE = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" ) diff --git a/google/cloud/bigtable/py.typed b/google/cloud/bigtable/py.typed index 7bd4705d4..889d34043 100644 --- a/google/cloud/bigtable/py.typed +++ b/google/cloud/bigtable/py.typed @@ -1,2 +1,2 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable package uses inline types. +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. diff --git a/google/cloud/bigtable/row_set.py b/google/cloud/bigtable/row_set.py index 82a540b5a..2bc436d54 100644 --- a/google/cloud/bigtable/row_set.py +++ b/google/cloud/bigtable/row_set.py @@ -22,7 +22,7 @@ class RowSet(object): """Convenience wrapper of google.bigtable.v2.RowSet Useful for creating a set of row keys and row ranges, which can - be passed to yield_rows method of class:`.Table.yield_rows`. + be passed to read_rows method of class:`.Table.read_rows`. """ def __init__(self): diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index e3191a729..0009f287e 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -47,7 +47,7 @@ from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 -from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_messages_v2_pb2, @@ -533,7 +533,7 @@ def get_encryption_info(self): for cluster_id, value_pb in table_pb.cluster_states.items() } - def read_row(self, row_key, filter_=None): + def read_row(self, row_key, filter_=None, retry=DEFAULT_RETRY_READ_ROWS): """Read a single row from this table. For example: @@ -550,6 +550,14 @@ def read_row(self, row_key, filter_=None): :param filter_: (Optional) The filter to apply to the contents of the row. If unset, returns the entire row. + :type retry: :class:`~google.api_core.retry.Retry` + :param retry: + (Optional) Retry delay and deadline arguments. To override, the + default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and + modified with the :meth:`~google.api_core.retry.Retry.with_delay` + method or the :meth:`~google.api_core.retry.Retry.with_deadline` + method. + :rtype: :class:`.PartialRowData`, :data:`NoneType ` :returns: The contents of the row if any chunks were returned in the response, otherwise :data:`None`. @@ -558,7 +566,9 @@ def read_row(self, row_key, filter_=None): """ row_set = RowSet() row_set.add_row_key(row_key) - result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set)) + result_iter = iter( + self.read_rows(filter_=filter_, row_set=row_set, retry=retry) + ) row = next(result_iter, None) if next(result_iter, None) is not None: raise ValueError("More than one row was returned.") @@ -980,7 +990,7 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0 if filter_: backups_filter = "({}) AND ({})".format(backups_filter, filter_) - parent = BigtableTableAdminClient.cluster_path( + parent = BaseBigtableTableAdminClient.cluster_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=cluster_id, @@ -1027,7 +1037,7 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non and `backup_id` parameters even of such specified. :return: An instance of - :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`. + :class:`~google.api_core.operation.Operation`. :raises: google.api_core.exceptions.AlreadyExists: If the table already exists. @@ -1039,13 +1049,13 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non """ api = self._instance._client.table_admin_client if not backup_name: - backup_name = BigtableTableAdminClient.backup_path( + backup_name = BaseBigtableTableAdminClient.backup_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=cluster_id, backup=backup_id, ) - return api.restore_table( + return api._restore_table( request={ "parent": self._instance.name, "table_id": new_table_id, diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index d26d79b3c..2d95b06c8 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,10 +25,10 @@ BigtableInstanceAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( @@ -46,6 +46,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( CreateInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateLogicalViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateMaterializedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( DeleteAppProfileRequest, ) @@ -55,6 +67,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( DeleteInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( GetAppProfileRequest, ) @@ -64,6 +82,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( GetInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( ListAppProfilesRequest, ) @@ -88,6 +112,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( ListInstancesResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListLogicalViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListLogicalViewsResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListMaterializedViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListMaterializedViewsResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( PartialUpdateClusterMetadata, ) @@ -109,6 +145,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( UpdateInstanceMetadata, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateLogicalViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateMaterializedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CheckConsistencyRequest, ) @@ -117,12 +165,24 @@ ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateAuthorizedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupMetadata, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateSchemaBundleMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateTableFromSnapshotMetadata, ) @@ -130,9 +190,18 @@ CreateTableFromSnapshotRequest, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DataBoostReadLocalWrites, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( DeleteBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( DeleteSnapshotRequest, ) @@ -146,13 +215,31 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( GenerateConsistencyTokenResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GetAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GetSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListAuthorizedViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListAuthorizedViewsResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( ListBackupsResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSchemaBundlesRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSchemaBundlesResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( ListSnapshotsRequest, ) @@ -179,15 +266,30 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( SnapshotTableRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + StandardReadRemoteWrites, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UndeleteTableMetadata, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UndeleteTableRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateAuthorizedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UpdateBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateSchemaBundleMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UpdateTableMetadata, ) @@ -200,33 +302,49 @@ from google.cloud.bigtable_admin_v2.types.instance import Cluster from google.cloud.bigtable_admin_v2.types.instance import HotTablet from google.cloud.bigtable_admin_v2.types.instance import Instance +from google.cloud.bigtable_admin_v2.types.instance import LogicalView +from google.cloud.bigtable_admin_v2.types.instance import MaterializedView +from google.cloud.bigtable_admin_v2.types.table import AuthorizedView from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig from google.cloud.bigtable_admin_v2.types.table import ColumnFamily from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo from google.cloud.bigtable_admin_v2.types.table import GcRule +from google.cloud.bigtable_admin_v2.types.table import ProtoSchema from google.cloud.bigtable_admin_v2.types.table import RestoreInfo +from google.cloud.bigtable_admin_v2.types.table import SchemaBundle from google.cloud.bigtable_admin_v2.types.table import Snapshot from google.cloud.bigtable_admin_v2.types.table import Table +from google.cloud.bigtable_admin_v2.types.table import TieredStorageConfig +from google.cloud.bigtable_admin_v2.types.table import TieredStorageRule from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType +from google.cloud.bigtable_admin_v2.types.types import Type __all__ = ( "BigtableInstanceAdminClient", "BigtableInstanceAdminAsyncClient", - "BigtableTableAdminClient", - "BigtableTableAdminAsyncClient", + "BaseBigtableTableAdminClient", + "BaseBigtableTableAdminAsyncClient", "CreateAppProfileRequest", "CreateClusterMetadata", "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", "DeleteAppProfileRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", "GetAppProfileRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", "ListAppProfilesRequest", "ListAppProfilesResponse", "ListClustersRequest", @@ -235,6 +353,10 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", @@ -242,26 +364,43 @@ "UpdateAppProfileRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", "CheckConsistencyRequest", "CheckConsistencyResponse", "CopyBackupMetadata", "CopyBackupRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", + "GetAuthorizedViewRequest", "GetBackupRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", @@ -272,9 +411,14 @@ "RestoreTableRequest", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "UndeleteTableMetadata", "UndeleteTableRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", "OperationProgress", @@ -285,14 +429,27 @@ "Cluster", "HotTablet", "Instance", + "LogicalView", + "MaterializedView", + "AuthorizedView", "Backup", "BackupInfo", "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", + "ProtoSchema", "RestoreInfo", + "SchemaBundle", "Snapshot", "Table", + "TieredStorageConfig", + "TieredStorageRule", "RestoreSourceType", + "Type", ) + +import google.cloud.bigtable_admin_v2.overlay # noqa: F401 +from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 + +__all__ += google.cloud.bigtable_admin_v2.overlay.__all__ diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index e546bae05..6d72a226d 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.35.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 811b956e0..6a47979fd 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,25 +15,43 @@ # from google.cloud.bigtable_admin_v2 import gapic_version as package_version +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .services.bigtable_instance_admin import BigtableInstanceAdminClient from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient -from .services.bigtable_table_admin import BigtableTableAdminClient -from .services.bigtable_table_admin import BigtableTableAdminAsyncClient +from .services.bigtable_table_admin import BaseBigtableTableAdminClient +from .services.bigtable_table_admin import BaseBigtableTableAdminAsyncClient from .types.bigtable_instance_admin import CreateAppProfileRequest from .types.bigtable_instance_admin import CreateClusterMetadata from .types.bigtable_instance_admin import CreateClusterRequest from .types.bigtable_instance_admin import CreateInstanceMetadata from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import CreateLogicalViewMetadata +from .types.bigtable_instance_admin import CreateLogicalViewRequest +from .types.bigtable_instance_admin import CreateMaterializedViewMetadata +from .types.bigtable_instance_admin import CreateMaterializedViewRequest from .types.bigtable_instance_admin import DeleteAppProfileRequest from .types.bigtable_instance_admin import DeleteClusterRequest from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import DeleteLogicalViewRequest +from .types.bigtable_instance_admin import DeleteMaterializedViewRequest from .types.bigtable_instance_admin import GetAppProfileRequest from .types.bigtable_instance_admin import GetClusterRequest from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import GetLogicalViewRequest +from .types.bigtable_instance_admin import GetMaterializedViewRequest from .types.bigtable_instance_admin import ListAppProfilesRequest from .types.bigtable_instance_admin import ListAppProfilesResponse from .types.bigtable_instance_admin import ListClustersRequest @@ -42,6 +60,10 @@ from .types.bigtable_instance_admin import ListHotTabletsResponse from .types.bigtable_instance_admin import ListInstancesRequest from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import ListLogicalViewsRequest +from .types.bigtable_instance_admin import ListLogicalViewsResponse +from .types.bigtable_instance_admin import ListMaterializedViewsRequest +from .types.bigtable_instance_admin import ListMaterializedViewsResponse from .types.bigtable_instance_admin import PartialUpdateClusterMetadata from .types.bigtable_instance_admin import PartialUpdateClusterRequest from .types.bigtable_instance_admin import PartialUpdateInstanceRequest @@ -49,26 +71,43 @@ from .types.bigtable_instance_admin import UpdateAppProfileRequest from .types.bigtable_instance_admin import UpdateClusterMetadata from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_instance_admin import UpdateLogicalViewMetadata +from .types.bigtable_instance_admin import UpdateLogicalViewRequest +from .types.bigtable_instance_admin import UpdateMaterializedViewMetadata +from .types.bigtable_instance_admin import UpdateMaterializedViewRequest from .types.bigtable_table_admin import CheckConsistencyRequest from .types.bigtable_table_admin import CheckConsistencyResponse from .types.bigtable_table_admin import CopyBackupMetadata from .types.bigtable_table_admin import CopyBackupRequest +from .types.bigtable_table_admin import CreateAuthorizedViewMetadata +from .types.bigtable_table_admin import CreateAuthorizedViewRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateSchemaBundleMetadata +from .types.bigtable_table_admin import CreateSchemaBundleRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata from .types.bigtable_table_admin import CreateTableFromSnapshotRequest from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DataBoostReadLocalWrites +from .types.bigtable_table_admin import DeleteAuthorizedViewRequest from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSchemaBundleRequest from .types.bigtable_table_admin import DeleteSnapshotRequest from .types.bigtable_table_admin import DeleteTableRequest from .types.bigtable_table_admin import DropRowRangeRequest from .types.bigtable_table_admin import GenerateConsistencyTokenRequest from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetAuthorizedViewRequest from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSchemaBundleRequest from .types.bigtable_table_admin import GetSnapshotRequest from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListAuthorizedViewsRequest +from .types.bigtable_table_admin import ListAuthorizedViewsResponse from .types.bigtable_table_admin import ListBackupsRequest from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSchemaBundlesRequest +from .types.bigtable_table_admin import ListSchemaBundlesResponse from .types.bigtable_table_admin import ListSnapshotsRequest from .types.bigtable_table_admin import ListSnapshotsResponse from .types.bigtable_table_admin import ListTablesRequest @@ -79,9 +118,14 @@ from .types.bigtable_table_admin import RestoreTableRequest from .types.bigtable_table_admin import SnapshotTableMetadata from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import StandardReadRemoteWrites from .types.bigtable_table_admin import UndeleteTableMetadata from .types.bigtable_table_admin import UndeleteTableRequest +from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata +from .types.bigtable_table_admin import UpdateAuthorizedViewRequest from .types.bigtable_table_admin import UpdateBackupRequest +from .types.bigtable_table_admin import UpdateSchemaBundleMetadata +from .types.bigtable_table_admin import UpdateSchemaBundleRequest from .types.bigtable_table_admin import UpdateTableMetadata from .types.bigtable_table_admin import UpdateTableRequest from .types.common import OperationProgress @@ -92,27 +136,130 @@ from .types.instance import Cluster from .types.instance import HotTablet from .types.instance import Instance +from .types.instance import LogicalView +from .types.instance import MaterializedView +from .types.table import AuthorizedView from .types.table import Backup from .types.table import BackupInfo from .types.table import ChangeStreamConfig from .types.table import ColumnFamily from .types.table import EncryptionInfo from .types.table import GcRule +from .types.table import ProtoSchema from .types.table import RestoreInfo +from .types.table import SchemaBundle from .types.table import Snapshot from .types.table import Table +from .types.table import TieredStorageConfig +from .types.table import TieredStorageRule from .types.table import RestoreSourceType +from .types.types import Type + +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.bigtable_admin_v2") # type: ignore + api_core.check_dependency_versions("google.cloud.bigtable_admin_v2") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.bigtable_admin_v2" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) __all__ = ( + "BaseBigtableTableAdminAsyncClient", "BigtableInstanceAdminAsyncClient", - "BigtableTableAdminAsyncClient", "AppProfile", + "AuthorizedView", "AutoscalingLimits", "AutoscalingTargets", "Backup", "BackupInfo", + "BaseBigtableTableAdminClient", "BigtableInstanceAdminClient", - "BigtableTableAdminClient", "ChangeStreamConfig", "CheckConsistencyRequest", "CheckConsistencyResponse", @@ -121,19 +268,32 @@ "CopyBackupMetadata", "CopyBackupRequest", "CreateAppProfileRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateClusterMetadata", "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", "DeleteAppProfileRequest", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", @@ -142,15 +302,21 @@ "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", "GetAppProfileRequest", + "GetAuthorizedViewRequest", "GetBackupRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", "HotTablet", "Instance", "ListAppProfilesRequest", "ListAppProfilesResponse", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", "ListClustersRequest", @@ -159,32 +325,58 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", "ListTablesResponse", + "LogicalView", + "MaterializedView", "ModifyColumnFamiliesRequest", "OperationProgress", "OptimizeRestoredTableMetadata", "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", + "ProtoSchema", "RestoreInfo", "RestoreSourceType", "RestoreTableMetadata", "RestoreTableRequest", + "SchemaBundle", "Snapshot", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "StorageType", "Table", + "TieredStorageConfig", + "TieredStorageRule", + "Type", "UndeleteTableMetadata", "UndeleteTableRequest", "UpdateAppProfileMetadata", "UpdateAppProfileRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", ) + +from .overlay import * # noqa: F403 + +__all__ += overlay.__all__ # noqa: F405 diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json index 9b3426470..9725d3384 100644 --- a/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -25,6 +25,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -40,6 +50,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -60,6 +80,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -80,6 +110,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -114,6 +154,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } }, @@ -135,6 +185,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -150,6 +210,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -170,6 +240,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -190,6 +270,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -224,6 +314,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } }, @@ -245,6 +345,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -260,6 +370,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -280,6 +400,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -300,6 +430,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -334,6 +474,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } } @@ -342,7 +492,7 @@ "BigtableTableAdmin": { "clients": { "grpc": { - "libraryClient": "BigtableTableAdminClient", + "libraryClient": "BaseBigtableTableAdminClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -354,11 +504,21 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -369,11 +529,21 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -394,6 +564,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -404,6 +579,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -414,11 +594,21 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -436,7 +626,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { @@ -459,11 +649,21 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" @@ -472,7 +672,7 @@ } }, "grpc-async": { - "libraryClient": "BigtableTableAdminAsyncClient", + "libraryClient": "BaseBigtableTableAdminAsyncClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -484,11 +684,21 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -499,11 +709,21 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -524,6 +744,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -534,6 +759,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -544,11 +774,21 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -566,7 +806,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { @@ -589,11 +829,21 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" @@ -602,7 +852,7 @@ } }, "rest": { - "libraryClient": "BigtableTableAdminClient", + "libraryClient": "BaseBigtableTableAdminClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -614,11 +864,21 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -629,11 +889,21 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -654,6 +924,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -664,6 +939,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -674,11 +954,21 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -696,7 +986,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { @@ -719,11 +1009,21 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index e546bae05..6d72a226d 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.35.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/overlay/__init__.py b/google/cloud/bigtable_admin_v2/overlay/__init__.py new file mode 100644 index 000000000..f66c7f8dd --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This directory and all its subdirectories are the only handwritten +# components of the otherwise autogenerated google/cloud/bigtable/admin_v2. +# The purpose of the overlay directory is to add additional functionality to +# the autogenerated library while preserving its developer experience. These +# handwritten additions currently consist of the following: +# +# 1. TODO: Document final GcRule design choice here +# 2. An LRO class for restore_table that exposes an Operation for +# OptimizeRestoreTable, if that LRO exists. +# 3. New methods (wait_for_consistency and wait_for_replication) that return +# a polling future class for automatically polling check_consistency. +# +# This directory is structured to mirror that of a typical autogenerated library (e.g. +# services/types subdirectories), and the aforementioned handwritten additions are +# currently implemented as either types under overlay/types or in methods in an overwritten +# client class under overlay/services. + +from .types import ( + AsyncRestoreTableOperation, + RestoreTableOperation, + WaitForConsistencyRequest, +) + +from .services.bigtable_table_admin import ( + BigtableTableAdminAsyncClient, + BigtableTableAdminClient, +) + +__all__ = ( + "AsyncRestoreTableOperation", + "RestoreTableOperation", + "BigtableTableAdminAsyncClient", + "BigtableTableAdminClient", + "WaitForConsistencyRequest", +) diff --git a/google/cloud/bigtable_admin_v2/overlay/services/__init__.py b/google/cloud/bigtable_admin_v2/overlay/services/__init__.py new file mode 100644 index 000000000..ab7686e26 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/services/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py new file mode 100644 index 000000000..f80e3234f --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: Add the async client after owlbot changes. + +from .async_client import BigtableTableAdminAsyncClient +from .client import BigtableTableAdminClient + +__all__ = ( + "BigtableTableAdminAsyncClient", + "BigtableTableAdminClient", +) diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py new file mode 100644 index 000000000..ee8e5757d --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py @@ -0,0 +1,375 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import functools + +from typing import Callable, Optional, Sequence, Tuple, Union +from google.api_core import gapic_v1 +from google.api_core import retry as retries + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import client_options as client_options_lib +from google.auth import credentials as ga_credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + async_client as base_client, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + BigtableTableAdminTransport, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + async_consistency, + async_restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable.gapic_version import __version__ as bigtable_version + + +DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO) +DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay-async" + + +class BigtableTableAdminAsyncClient(base_client.BaseBigtableTableAdminAsyncClient): + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = "grpc_asyncio", + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the Bigtable table admin async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + super(BigtableTableAdminAsyncClient, self).__init__( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> async_restore_table.AsyncRestoreTableOperation: + r"""Create a new table by restoring from a completed backup. The + returned table :class:`long-running operation + ` + can be used to track the progress of the operation, and to cancel it. The + :attr:`metadata ` field type is + :class:`RestoreTableMetadata `. + The :meth:`response ` type is + :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful. + + Additionally, the returned :class:`long-running-operation ` + provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation.optimize_restore_table_operation` that + provides access to a :class:`google.api_core.operation_async.AsyncOperation` object representing the OptimizeRestoreTable long-running-operation + after the current one has completed. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + async def sample_restore_table(): + # Create a client + client = admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = await client.restore_table(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + # Handle LRO2 + optimize_operation = await operation.optimize_restore_table_operation() + + if optimize_operation: + print("Waiting for table optimization to complete...") + + response = await optimize_operation.result() + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + """ + operation = await self._restore_table( + request=request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + restore_table_operation = async_restore_table.AsyncRestoreTableOperation( + self._client._transport.operations_client, operation + ) + return restore_table_operation + + async def wait_for_consistency( + self, + request: Optional[ + Union[wait_for_consistency_request.WaitForConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bool: + r"""Blocks until the mutations for the specified Table that have been + made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly` + can see all writes committed before the token was created. This is done by generating + a consistency token for the Table, then polling :meth:`check_consistency` + for the specified table until the call returns True. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + async def sample_wait_for_consistency(): + # Create a client + client = admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = admin_v2.WaitForConsistencyRequest( + name="name_value", + ) + + # Make the request + print("Waiting for operation to complete...") + + response = await client.wait_for_replication(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]): + The request object. + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + bool: + If the `standard_read_remote_writes` mode is specified in the request object, returns + `True` after the mutations of the specified table have been fully replicated. If the + `data_boost_read_local_writes` mode is specified in the request object, returns `True` + after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes + committed before the token was created. + + Raises: + google.api_core.GoogleAPICallError: If the operation errors or if + the timeout is reached before the operation completes. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, wait_for_consistency_request.WaitForConsistencyRequest + ): + request = wait_for_consistency_request.WaitForConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Generate the consistency token. + generate_consistency_token_request = ( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=request.name, + ) + ) + + generate_consistency_response = await self.generate_consistency_token( + generate_consistency_token_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Create the CheckConsistencyRequest object. + check_consistency_request = bigtable_table_admin.CheckConsistencyRequest( + name=request.name, + consistency_token=generate_consistency_response.consistency_token, + ) + + # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to + # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check + # whether or not that field is defined in the original request object. + mode_oneof_field = request._pb.WhichOneof("mode") + if mode_oneof_field: + setattr( + check_consistency_request, + mode_oneof_field, + getattr(request, mode_oneof_field), + ) + + check_consistency_call = functools.partial( + self.check_consistency, + check_consistency_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Block and wait until the polling harness returns True. + check_consistency_future = ( + async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + ) + return await check_consistency_future.result() diff --git a/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py new file mode 100644 index 000000000..1b6770b10 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py @@ -0,0 +1,373 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import functools + +from typing import Callable, Optional, Sequence, Tuple, Union +from google.api_core import gapic_v1 +from google.api_core import retry as retries + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import client_options as client_options_lib +from google.auth import credentials as ga_credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as base_client, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + BigtableTableAdminTransport, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + consistency, + restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable.gapic_version import __version__ as bigtable_version + + +DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO) +DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay" + + +class BigtableTableAdminClient(base_client.BaseBigtableTableAdminClient): + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the Bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + super(BigtableTableAdminClient, self).__init__( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + def restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> restore_table.RestoreTableOperation: + r"""Create a new table by restoring from a completed backup. The + returned table :class:`long-running operation + ` + can be used to track the progress of the operation, and to cancel it. The + :attr:`metadata ` field type is + :class:`RestoreTableMetadata `. + The :meth:`response ` type is + :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful. + + Additionally, the returned :class:`long-running-operation ` + provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation.optimize_restore_table_operation` that + provides access to a :class:`google.api_core.operation.Operation` object representing the OptimizeRestoreTable long-running-operation + after the current one has completed. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + def sample_restore_table(): + # Create a client + client = admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client.restore_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + # Handle LRO2 + optimize_operation = operation.optimize_restore_table_operation() + + if optimize_operation: + print("Waiting for table optimization to complete...") + + response = optimize_operation.result() + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + """ + operation = self._restore_table( + request=request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + restore_table_operation = restore_table.RestoreTableOperation( + self._transport.operations_client, operation + ) + return restore_table_operation + + def wait_for_consistency( + self, + request: Optional[ + Union[wait_for_consistency_request.WaitForConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bool: + r"""Blocks until the mutations for the specified Table that have been + made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly` + can see all writes committed before the token was created. This is done by generating + a consistency token for the Table, then polling :meth:`check_consistency` + for the specified table until the call returns True. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + def sample_wait_for_consistency(): + # Create a client + client = admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = admin_v2.WaitForConsistencyRequest( + name="name_value", + ) + + # Make the request + print("Waiting for operation to complete...") + + response = client.wait_for_replication(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]): + The request object. + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + bool: + If the `standard_read_remote_writes` mode is specified in the request object, returns + `True` after the mutations of the specified table have been fully replicated. If the + `data_boost_read_local_writes` mode is specified in the request object, returns `True` + after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes + committed before the token was created. + + Raises: + google.api_core.GoogleAPICallError: If the operation errors or if + the timeout is reached before the operation completes. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, wait_for_consistency_request.WaitForConsistencyRequest + ): + request = wait_for_consistency_request.WaitForConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Generate the consistency token. + generate_consistency_token_request = ( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=request.name, + ) + ) + + generate_consistency_response = self.generate_consistency_token( + generate_consistency_token_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Create the CheckConsistencyRequest object. + check_consistency_request = bigtable_table_admin.CheckConsistencyRequest( + name=request.name, + consistency_token=generate_consistency_response.consistency_token, + ) + + # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to + # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check + # whether or not that field is defined in the original request object. + mode_oneof_field = request._pb.WhichOneof("mode") + if mode_oneof_field: + setattr( + check_consistency_request, + mode_oneof_field, + getattr(request, mode_oneof_field), + ) + + check_consistency_call = functools.partial( + self.check_consistency, + check_consistency_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Block and wait until the polling harness returns True. + check_consistency_future = consistency._CheckConsistencyPollingFuture( + check_consistency_call + ) + return check_consistency_future.result() diff --git a/google/cloud/bigtable_admin_v2/overlay/types/__init__.py b/google/cloud/bigtable_admin_v2/overlay/types/__init__.py new file mode 100644 index 000000000..16b032ac4 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .async_restore_table import ( + AsyncRestoreTableOperation, +) + +from .restore_table import ( + RestoreTableOperation, +) + +from .wait_for_consistency_request import ( + WaitForConsistencyRequest, +) + +__all__ = ( + "AsyncRestoreTableOperation", + "RestoreTableOperation", + "WaitForConsistencyRequest", +) diff --git a/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py b/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py new file mode 100644 index 000000000..0703940d5 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py @@ -0,0 +1,104 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Awaitable, Union, Callable + +from google.api_core.future import async_future +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +# The consistency check could take a very long time, so we wait indefinitely. +DEFAULT_RETRY = async_future.DEFAULT_RETRY.with_timeout(None) + + +class _AsyncCheckConsistencyPollingFuture(async_future.AsyncFuture): + """A Future that polls an underlying `check_consistency` operation until it returns True. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_consistency` + or + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_replication` + methods. + + Args: + check_consistency_call(Callable[ + [Optional[google.api_core.retry.Retry], + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]): + A :meth:`check_consistency + ` + call from the admin client. The call should fix every user parameter except for retry, + which will be done via :meth:`functools.partial`. + default_retry(Optional[google.api_core.retry.Retry]): The `retry` parameter passed in to either + :meth:`wait_for_consistency + ` + or :meth:`wait_for_replication + ` + retry (google.api_core.retry.AsyncRetry): The retry configuration used + when polling. This can be used to control how often :meth:`done` + is polled. Regardless of the retry's ``deadline``, it will be + overridden by the ``timeout`` argument to :meth:`result`. + """ + + def __init__( + self, + check_consistency_call: Callable[ + [OptionalRetry], Awaitable[bigtable_table_admin.CheckConsistencyResponse] + ], + retry: retries.AsyncRetry = DEFAULT_RETRY, + **kwargs + ): + super(_AsyncCheckConsistencyPollingFuture, self).__init__(retry=retry, **kwargs) + + # Done is called with two different scenarios, retry is specified or not specified. + # API_call will be a functools partial with everything except retry specified because of + # that. + self._check_consistency_call = check_consistency_call + + async def done(self, retry: OptionalRetry = None): + """Polls the underlying `check_consistency` call to see if the future is complete. + + Args: + retry (google.api_core.retry.Retry): (Optional) How to retry the + polling RPC (to not be confused with polling configuration. See + the documentation for :meth:`result ` + for details). + + Returns: + bool: True if the future is complete, False otherwise. + """ + if self._future.done(): + return True + + try: + check_consistency_response = await self._check_consistency_call() + if check_consistency_response.consistent: + self.set_result(True) + + return check_consistency_response.consistent + except Exception as e: + self.set_exception(e) + + def cancel(self): + raise NotImplementedError("Cannot cancel consistency token operation") + + def cancelled(self): + raise NotImplementedError("Cannot cancel consistency token operation") diff --git a/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py b/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py new file mode 100644 index 000000000..9edfb4963 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py @@ -0,0 +1,99 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from google.api_core import exceptions +from google.api_core import operation_async +from google.protobuf import empty_pb2 + +from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata + + +class AsyncRestoreTableOperation(operation_async.AsyncOperation): + """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation. + + This is needed to expose a potential long-running operation that might run after this operation + finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation` + method. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's :meth:`restore_table + ` + method. + + Args: + operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations + client from the admin client class's transport. + restore_table_operation (google.api_core.operation_async.AsyncOperation): A + :class:`google.api_core.operation_async.AsyncOperation` + instance resembling a RestoreTable long-running operation + """ + + def __init__( + self, operations_client, restore_table_operation: operation_async.AsyncOperation + ): + self._operations_client = operations_client + self._optimize_restored_table_operation = None + super().__init__( + restore_table_operation._operation, + restore_table_operation._refresh, + restore_table_operation._cancel, + restore_table_operation._result_type, + restore_table_operation._metadata_type, + retry=restore_table_operation._retry, + ) + + async def optimize_restored_table_operation( + self, + ) -> Optional[operation_async.AsyncOperation]: + """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes. + The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this + method will return `None`. + This method must not be called before the parent restore_table operation is complete. + Returns: + An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation + after this one. + Raises: + RuntimeError: raised when accessed before the restore_table operation is complete + + Raises: + google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete + """ + if not await self.done(): + raise exceptions.GoogleAPIError( + "optimize_restored_table operation can't be accessed until the restore_table operation is complete" + ) + + if self._optimize_restored_table_operation is not None: + return self._optimize_restored_table_operation + + operation_name = self.metadata.optimize_table_operation_name + + # When the RestoreTable operation finishes, it might not necessarily trigger + # an optimize operation. + if operation_name: + gapic_operation = await self._operations_client.get_operation( + name=operation_name + ) + self._optimize_restored_table_operation = operation_async.from_gapic( + gapic_operation, + self._operations_client, + empty_pb2.Empty, + metadata_type=OptimizeRestoredTableMetadata, + ) + return self._optimize_restored_table_operation + else: + # no optimize operation found + return None diff --git a/google/cloud/bigtable_admin_v2/overlay/types/consistency.py b/google/cloud/bigtable_admin_v2/overlay/types/consistency.py new file mode 100644 index 000000000..63a110975 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/consistency.py @@ -0,0 +1,101 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union, Callable + +from google.api_core.future import polling +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +# The consistency check could take a very long time, so we wait indefinitely. +DEFAULT_RETRY = polling.DEFAULT_POLLING.with_timeout(None) + + +class _CheckConsistencyPollingFuture(polling.PollingFuture): + """A Future that polls an underlying `check_consistency` operation until it returns True. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_consistency` + or + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_replication` + methods. + + Args: + check_consistency_call(Callable[ + [Optional[google.api_core.retry.Retry], + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]): + A :meth:`check_consistency + ` + call from the admin client. The call should fix every user parameter, + which will be done via :meth:`functools.partial`. + polling (google.api_core.retry.Retry): The configuration used for polling. + This parameter controls how often :meth:`done` is polled. If the + ``timeout`` argument is specified in the :meth:`result + ` method it will + override the ``polling.timeout`` property. + """ + + def __init__( + self, + check_consistency_call: Callable[ + [OptionalRetry], bigtable_table_admin.CheckConsistencyResponse + ], + polling: retries.Retry = DEFAULT_RETRY, + **kwargs + ): + super(_CheckConsistencyPollingFuture, self).__init__(polling=polling, **kwargs) + + # Done is called with two different scenarios, retry is specified or not specified. + # API_call will be a functools partial with everything except retry specified because of + # that. + self._check_consistency_call = check_consistency_call + + def done(self, retry: OptionalRetry = None): + """Polls the underlying `check_consistency` call to see if the future is complete. + + Args: + retry (google.api_core.retry.Retry): (Optional) How to retry the + polling RPC (to not be confused with polling configuration. See + the documentation for :meth:`result ` + for details). + + Returns: + bool: True if the future is complete, False otherwise. + """ + + if self._result_set: + return True + + try: + check_consistency_response = self._check_consistency_call() + if check_consistency_response.consistent: + self.set_result(True) + + return check_consistency_response.consistent + except Exception as e: + self.set_exception(e) + + def cancel(self): + raise NotImplementedError("Cannot cancel consistency token operation") + + def cancelled(self): + raise NotImplementedError("Cannot cancel consistency token operation") diff --git a/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py b/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py new file mode 100644 index 000000000..84c9c5d91 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py @@ -0,0 +1,102 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from google.api_core import exceptions +from google.api_core import operation +from google.protobuf import empty_pb2 + +from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata + + +class RestoreTableOperation(operation.Operation): + """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation. + + This is needed to expose a potential long-running operation that might run after this operation + finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation` + method. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's :meth:`restore_table + ` + method. + + Args: + operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations + client from the admin client class's transport. + restore_table_operation (google.api_core.operation.Operation): A :class:`google.api_core.operation.Operation` + instance resembling a RestoreTable long-running operation + """ + + def __init__(self, operations_client, restore_table_operation: operation.Operation): + self._operations_client = operations_client + self._optimize_restored_table_operation = None + super().__init__( + restore_table_operation._operation, + restore_table_operation._refresh, + restore_table_operation._cancel, + restore_table_operation._result_type, + restore_table_operation._metadata_type, + polling=restore_table_operation._polling, + ) + + def optimize_restored_table_operation(self) -> Optional[operation.Operation]: + """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes. + + This must not be called before the parent restore_table operation is complete. You can guarantee + this happening by calling this function after this class's :meth:`google.api_core.operation.Operation.result` + method. + + The follow-up operation has + :attr:`metadata ` type + :class:`OptimizeRestoredTableMetadata + ` + and no return value, but can be waited for with `result`. + + The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this + method will return `None`. + + Returns: + Optional[google.api_core.operation.Operation]: + An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation + after this one. + + Raises: + google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete + """ + if not self.done(): + raise exceptions.GoogleAPIError( + "optimize_restored_table operation can't be accessed until the restore_table operation is complete" + ) + + if self._optimize_restored_table_operation is not None: + return self._optimize_restored_table_operation + + operation_name = self.metadata.optimize_table_operation_name + + # When the RestoreTable operation finishes, it might not necessarily trigger + # an optimize operation. + if operation_name: + gapic_operation = self._operations_client.get_operation(name=operation_name) + self._optimize_restored_table_operation = operation.from_gapic( + gapic_operation, + self._operations_client, + empty_pb2.Empty, + metadata_type=OptimizeRestoredTableMetadata, + ) + return self._optimize_restored_table_operation + else: + # no optimize operation found + return None diff --git a/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py b/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py new file mode 100644 index 000000000..51070230a --- /dev/null +++ b/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "WaitForConsistencyRequest", + }, +) + + +# The WaitForConsistencyRequest object is not a real proto. It is a wrapper +# class intended for the handwritten method wait_for_consistency. It is +# constructed by extending a Proto Plus message class to get a developer +# experience closest to that of an autogenerated GAPIC method, and to allow +# developers to manipulate the wrapper class like they would a request proto +# for an autogenerated call. +class WaitForConsistencyRequest(proto.Message): + """Wrapper class for encapsulating parameters for the `wait_for_consistency` method in both + :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client.BigtableTableAdminClient` + and :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client.BigtableTableAdmiAsyncClient`. + + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites): + Checks that reads using an app profile with + ``StandardIsolation`` can see all writes committed before + the token was created, even if the read and write target + different clusters. + + This field is a member of `oneof`_ ``mode``. + data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites): + Checks that reads using an app profile with + ``DataBoostIsolationReadOnly`` can see all writes committed + before the token was created, but only if the read and write + target the same cluster. + + This field is a member of `oneof`_ ``mode``. + """ + + name: str = proto.Field(proto.STRING, number=1) + standard_read_remote_writes: bigtable_table_admin.StandardReadRemoteWrites = ( + proto.Field( + proto.MESSAGE, + number=2, + oneof="mode", + message=bigtable_table_admin.StandardReadRemoteWrites, + ) + ) + data_boost_read_local_writes: bigtable_table_admin.DataBoostReadLocalWrites = ( + proto.Field( + proto.MESSAGE, + number=3, + oneof="mode", + message=bigtable_table_admin.DataBoostReadLocalWrites, + ) + ) diff --git a/google/cloud/bigtable_admin_v2/services/__init__.py b/google/cloud/bigtable_admin_v2/services/__init__.py index 89a37dc92..cbf94b283 100644 --- a/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 40631d1b4..20ac9e4fc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 3f67620c0..632496543 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict -import functools import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -33,14 +34,16 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf + try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -57,6 +60,15 @@ from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport from .client import BigtableInstanceAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableInstanceAdminAsyncClient: """Service for creating, configuring, and deleting Cloud @@ -67,8 +79,12 @@ class BigtableInstanceAdminAsyncClient: _client: BigtableInstanceAdminClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) parse_app_profile_path = staticmethod( @@ -86,6 +102,16 @@ class BigtableInstanceAdminAsyncClient: ) instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + logical_view_path = staticmethod(BigtableInstanceAdminClient.logical_view_path) + parse_logical_view_path = staticmethod( + BigtableInstanceAdminClient.parse_logical_view_path + ) + materialized_view_path = staticmethod( + BigtableInstanceAdminClient.materialized_view_path + ) + parse_materialized_view_path = staticmethod( + BigtableInstanceAdminClient.parse_materialized_view_path + ) table_path = staticmethod(BigtableInstanceAdminClient.table_path) parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) common_billing_account_path = staticmethod( @@ -193,20 +219,42 @@ def transport(self) -> BigtableInstanceAdminTransport: """ return self._client.transport - get_transport_class = functools.partial( - type(BigtableInstanceAdminClient).get_transport_class, - type(BigtableInstanceAdminClient), - ) + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = BigtableInstanceAdminClient.get_transport_class def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", + transport: Optional[ + Union[ + str, + BigtableInstanceAdminTransport, + Callable[..., BigtableInstanceAdminTransport], + ] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable instance admin client. + """Instantiates the bigtable instance admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -214,26 +262,43 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableInstanceAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -245,6 +310,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + async def create_instance( self, request: Optional[ @@ -257,7 +344,7 @@ async def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. @@ -267,6 +354,41 @@ async def create_instance( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): The request object. Request message for @@ -300,16 +422,17 @@ async def create_instance( ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -323,16 +446,22 @@ async def create_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, instance_id, instance, clusters] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): + request = bigtable_instance_admin.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -348,11 +477,9 @@ async def create_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_instance, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -360,6 +487,9 @@ async def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -388,10 +518,36 @@ async def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_instance(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): The request object. Request message for @@ -404,11 +560,13 @@ async def get_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -420,16 +578,22 @@ async def get_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): + request = bigtable_instance_admin.GetInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -438,21 +602,9 @@ async def get_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -460,6 +612,9 @@ async def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -480,10 +635,36 @@ async def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_instances(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): The request object. Request message for @@ -496,11 +677,13 @@ async def list_instances( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -509,16 +692,22 @@ async def list_instances( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListInstancesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): + request = bigtable_instance_admin.ListInstancesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -527,21 +716,9 @@ async def list_instances( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_instances, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_instances + ] # Certain fields should be provided within the metadata header; # add these here. @@ -549,6 +726,9 @@ async def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -566,13 +746,39 @@ async def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. To update other Instance properties, such as labels, use PartialUpdateInstance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = await client.update_instance(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): The request object. A collection of Bigtable @@ -581,11 +787,13 @@ async def update_instance( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -597,25 +805,16 @@ async def update_instance( """ # Create or coerce a protobuf request object. - request = instance.Instance(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, instance.Instance): + request = instance.Instance(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -623,6 +822,9 @@ async def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -644,12 +846,45 @@ async def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the preferred way to update an Instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): The request object. Request message for @@ -669,11 +904,13 @@ async def partial_update_instance( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -687,16 +924,24 @@ async def partial_update_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.PartialUpdateInstanceRequest + ): + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -707,21 +952,9 @@ async def partial_update_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_instance, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.partial_update_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -731,6 +964,9 @@ async def partial_update_instance( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -759,10 +995,33 @@ async def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + await client.delete_instance(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): The request object. Request message for @@ -775,23 +1034,31 @@ async def delete_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): + request = bigtable_instance_admin.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -800,11 +1067,9 @@ async def delete_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_instance, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -812,6 +1077,9 @@ async def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -831,7 +1099,7 @@ async def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. @@ -841,6 +1109,37 @@ async def create_cluster( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): The request object. Request message for @@ -869,11 +1168,13 @@ async def create_cluster( This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -886,16 +1187,22 @@ async def create_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cluster_id, cluster] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): + request = bigtable_instance_admin.CreateClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -908,11 +1215,9 @@ async def create_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -920,6 +1225,9 @@ async def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -948,10 +1256,36 @@ async def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): The request object. Request message for @@ -964,11 +1298,13 @@ async def get_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -979,16 +1315,22 @@ async def get_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetClusterRequest): + request = bigtable_instance_admin.GetClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -997,21 +1339,9 @@ async def get_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1019,6 +1349,9 @@ async def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1039,10 +1372,36 @@ async def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_clusters(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): The request object. Request message for @@ -1057,11 +1416,13 @@ async def list_clusters( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1070,16 +1431,22 @@ async def list_clusters( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListClustersRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListClustersRequest): + request = bigtable_instance_admin.ListClustersRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1088,21 +1455,9 @@ async def list_clusters( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_clusters + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1110,6 +1465,9 @@ async def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1127,7 +1485,7 @@ async def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. @@ -1135,17 +1493,48 @@ async def update_cluster( cluster_config.cluster_autoscaling_config. In order to update it, you must use PartialUpdateCluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): The request object. A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1158,25 +1547,16 @@ async def update_cluster( """ # Create or coerce a protobuf request object. - request = instance.Cluster(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, instance.Cluster): + request = instance.Cluster(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1184,6 +1564,9 @@ async def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1213,7 +1596,7 @@ async def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1230,6 +1613,35 @@ async def partial_update_cluster( cluster_config.cluster_autoscaling_config, and explicitly set a serve_node count via the update_mask. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): The request object. Request message for @@ -1248,11 +1660,13 @@ async def partial_update_cluster( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1265,16 +1679,22 @@ async def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1285,11 +1705,9 @@ async def partial_update_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_cluster, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.partial_update_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1299,6 +1717,9 @@ async def partial_update_cluster( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1327,10 +1748,33 @@ async def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + await client.delete_cluster(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): The request object. Request message for @@ -1343,23 +1787,31 @@ async def delete_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): + request = bigtable_instance_admin.DeleteClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1368,11 +1820,9 @@ async def delete_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1380,6 +1830,9 @@ async def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1399,10 +1852,41 @@ async def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = await client.create_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): The request object. Request message for @@ -1431,11 +1915,13 @@ async def create_app_profile( This corresponds to the ``app_profile`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1445,16 +1931,22 @@ async def create_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, app_profile_id, app_profile] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): + request = bigtable_instance_admin.CreateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1467,11 +1959,9 @@ async def create_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1479,6 +1969,9 @@ async def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1499,10 +1992,36 @@ async def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): The request object. Request message for @@ -1515,11 +2034,13 @@ async def get_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1529,16 +2050,22 @@ async def get_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): + request = bigtable_instance_admin.GetAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1547,21 +2074,9 @@ async def get_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_app_profile, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1569,6 +2084,9 @@ async def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1589,10 +2107,37 @@ async def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): The request object. Request message for @@ -1608,11 +2153,13 @@ async def list_app_profiles( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: @@ -1624,16 +2171,22 @@ async def list_app_profiles( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListAppProfilesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): + request = bigtable_instance_admin.ListAppProfilesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1642,21 +2195,9 @@ async def list_app_profiles( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_app_profiles, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_app_profiles + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1664,6 +2205,9 @@ async def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1678,6 +2222,8 @@ async def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1694,10 +2240,43 @@ async def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): The request object. Request message for @@ -1717,11 +2296,13 @@ async def update_app_profile( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1732,16 +2313,22 @@ async def update_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [app_profile, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.UpdateAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): + request = bigtable_instance_admin.UpdateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1752,21 +2339,9 @@ async def update_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_app_profile, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1776,6 +2351,9 @@ async def update_app_profile( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1802,12 +2380,37 @@ async def delete_app_profile( ] = None, *, name: Optional[str] = None, + ignore_warnings: Optional[bool] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + await client.delete_app_profile(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): The request object. Request message for @@ -1820,36 +2423,51 @@ async def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + ignore_warnings (:class:`bool`): + Required. If true, ignore safety + checks when deleting the app profile. + + This corresponds to the ``ignore_warnings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, ignore_warnings] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): + request = bigtable_instance_admin.DeleteAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name + if ignore_warnings is not None: + request.ignore_warnings = ignore_warnings # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1857,6 +2475,9 @@ async def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1872,12 +2493,39 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): The request object. Request message for ``GetIamPolicy`` method. @@ -1890,11 +2538,13 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1915,57 +2565,46 @@ async def get_iam_policy( constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1973,6 +2612,9 @@ async def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1991,11 +2633,38 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): The request object. Request message for ``SetIamPolicy`` method. @@ -2008,11 +2677,13 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2033,47 +2704,46 @@ async def set_iam_policy( constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.set_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2081,6 +2751,9 @@ async def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2100,11 +2773,39 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): The request object. Request message for ``TestIamPermissions`` method. @@ -2126,53 +2827,45 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, + resource=resource, permissions=permissions ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2180,6 +2873,9 @@ async def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2200,11 +2896,38 @@ async def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsAsyncPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): The request object. Request message for @@ -2217,11 +2940,13 @@ async def list_hot_tablets( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: @@ -2233,16 +2958,22 @@ async def list_hot_tablets( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListHotTabletsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): + request = bigtable_instance_admin.ListHotTabletsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2251,21 +2982,9 @@ async def list_hot_tablets( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hot_tablets, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_hot_tablets + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2273,6 +2992,9 @@ async def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2287,22 +3009,1332 @@ async def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) # Done; return the response. return response - async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=package_version.__version__ + async def create_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + logical_view: Optional[instance.LogicalView] = None, + logical_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a logical view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateLogicalView. + parent (:class:`str`): + Required. The parent instance where this logical view + will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): + Required. The logical view to create. + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view_id (:class:`str`): + Required. The ID to use for the + logical view, which will become the + final component of the logical view's + resource name. + + This corresponds to the ``logical_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, logical_view, logical_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): + request = bigtable_instance_admin.CreateLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if logical_view is not None: + request.logical_view = logical_view + if logical_view_id is not None: + request.logical_view_id = logical_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + async def get_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Gets information about a logical view. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_logical_view(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + name (:class:`str`): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): + request = bigtable_instance_admin.GetLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_logical_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListLogicalViewsAsyncPager: + r"""Lists information about logical views in an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + parent (:class:`str`): + Required. The unique name of the instance for which the + list of logical views is requested. Values are of the + form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): + request = bigtable_instance_admin.ListLogicalViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_logical_views + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListLogicalViewsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] + ] = None, + *, + logical_view: Optional[instance.LogicalView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a logical view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. + logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [logical_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): + request = bigtable_instance_admin.UpdateLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if logical_view is not None: + request.logical_view = logical_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("logical_view.name", request.logical_view.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + async def delete_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a logical view from an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_logical_view(request=request) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + name (:class:`str`): + Required. The unique name of the logical view to be + deleted. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): + request = bigtable_instance_admin.DeleteLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + materialized_view: Optional[instance.MaterializedView] = None, + materialized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a materialized view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateMaterializedView. + parent (:class:`str`): + Required. The parent instance where this materialized + view will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): + Required. The materialized view to + create. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view_id (:class:`str`): + Required. The ID to use for the + materialized view, which will become the + final component of the materialized + view's resource name. + + This corresponds to the ``materialized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, materialized_view, materialized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.CreateMaterializedViewRequest + ): + request = bigtable_instance_admin.CreateMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if materialized_view is not None: + request.materialized_view = materialized_view + if materialized_view_id is not None: + request.materialized_view_id = materialized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + async def get_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Gets information about a materialized view. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_materialized_view(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetMaterializedView. + name (:class:`str`): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.MaterializedView: + A materialized view object that can + be referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): + request = bigtable_instance_admin.GetMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_materialized_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListMaterializedViewsAsyncPager: + r"""Lists information about materialized views in an + instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + parent (:class:`str`): + Required. The unique name of the instance for which the + list of materialized views is requested. Values are of + the form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.ListMaterializedViewsRequest + ): + request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_materialized_views + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMaterializedViewsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] + ] = None, + *, + materialized_view: Optional[instance.MaterializedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a materialized view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to + identify the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [materialized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.UpdateMaterializedViewRequest + ): + request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if materialized_view is not None: + request.materialized_view = materialized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("materialized_view.name", request.materialized_view.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + async def delete_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a materialized view from an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_materialized_view(request=request) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + name (:class:`str`): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.DeleteMaterializedViewRequest + ): + request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 52c61ea4f..9d64108bb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,14 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -28,6 +32,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -40,11 +45,21 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -137,11 +152,43 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -300,6 +347,50 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def logical_view_path( + project: str, + instance: str, + logical_view: str, + ) -> str: + """Returns a fully-qualified logical_view string.""" + return "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( + project=project, + instance=instance, + logical_view=logical_view, + ) + + @staticmethod + def parse_logical_view_path(path: str) -> Dict[str, str]: + """Parses a logical_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/logicalViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def materialized_view_path( + project: str, + instance: str, + materialized_view: str, + ) -> str: + """Returns a fully-qualified materialized_view string.""" + return "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + + @staticmethod + def parse_materialized_view_path(path: str) -> Dict[str, str]: + """Parses a materialized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/materializedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def table_path( project: str, @@ -403,7 +494,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -433,14 +524,15 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -448,7 +540,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -466,11 +558,178 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = BigtableInstanceAdminClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableInstanceAdminTransport]] = None, + transport: Optional[ + Union[ + str, + BigtableInstanceAdminTransport, + Callable[..., BigtableInstanceAdminTransport], + ] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -482,25 +741,37 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableInstanceAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -511,17 +782,38 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableInstanceAdminClient._read_environment_variables() + self._client_cert_source = BigtableInstanceAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableInstanceAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() - api_key_value = getattr(client_options, "api_key", None) + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -530,20 +822,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableInstanceAdminTransport): + transport_provided = isinstance(transport, BigtableInstanceAdminTransport) + if transport_provided: # transport is a BigtableInstanceAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableInstanceAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BigtableInstanceAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -553,19 +858,50 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) - self._transport = Transport( + transport_init: Union[ + Type[BigtableInstanceAdminTransport], + Callable[..., BigtableInstanceAdminTransport], + ] = ( + BigtableInstanceAdminClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableInstanceAdminTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + def create_instance( self, request: Optional[ @@ -578,7 +914,7 @@ def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create an instance within a project. @@ -588,6 +924,41 @@ def create_instance( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for @@ -621,7 +992,6 @@ def create_instance( ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this @@ -629,8 +999,10 @@ def create_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -644,19 +1016,20 @@ def create_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, instance_id, instance, clusters] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): request = bigtable_instance_admin.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -680,6 +1053,9 @@ def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -708,10 +1084,36 @@ def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = client.get_instance(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): The request object. Request message for @@ -727,8 +1129,10 @@ def get_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -740,19 +1144,20 @@ def get_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): request = bigtable_instance_admin.GetInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -770,6 +1175,9 @@ def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -790,10 +1198,36 @@ def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_instances(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): The request object. Request message for @@ -809,8 +1243,10 @@ def list_instances( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -819,19 +1255,20 @@ def list_instances( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListInstancesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): request = bigtable_instance_admin.ListInstancesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -849,6 +1286,9 @@ def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -866,13 +1306,39 @@ def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. To update other Instance properties, such as labels, use PartialUpdateInstance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = client.update_instance(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): The request object. A collection of Bigtable @@ -884,8 +1350,10 @@ def update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -897,10 +1365,8 @@ def update_instance( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Instance. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, instance.Instance): request = instance.Instance(request) @@ -914,6 +1380,9 @@ def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -935,12 +1404,45 @@ def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the preferred way to update an Instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): The request object. Request message for @@ -963,8 +1465,10 @@ def partial_update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -978,19 +1482,20 @@ def partial_update_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable_instance_admin.PartialUpdateInstanceRequest ): @@ -1014,6 +1519,9 @@ def partial_update_instance( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1042,10 +1550,33 @@ def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + client.delete_instance(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): The request object. Request message for @@ -1061,23 +1592,26 @@ def delete_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): request = bigtable_instance_admin.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1095,6 +1629,9 @@ def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1114,7 +1651,7 @@ def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a cluster within an instance. @@ -1124,6 +1661,37 @@ def create_cluster( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for @@ -1155,8 +1723,10 @@ def create_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1169,19 +1739,20 @@ def create_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cluster_id, cluster] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): request = bigtable_instance_admin.CreateClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1203,6 +1774,9 @@ def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1231,10 +1805,36 @@ def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): The request object. Request message for @@ -1250,8 +1850,10 @@ def get_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -1262,19 +1864,20 @@ def get_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetClusterRequest): request = bigtable_instance_admin.GetClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1292,6 +1895,9 @@ def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1312,10 +1918,36 @@ def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_clusters(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): The request object. Request message for @@ -1333,8 +1965,10 @@ def list_clusters( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1343,19 +1977,20 @@ def list_clusters( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListClustersRequest): request = bigtable_instance_admin.ListClustersRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1373,6 +2008,9 @@ def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1390,7 +2028,7 @@ def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a cluster within an instance. @@ -1398,6 +2036,35 @@ def update_cluster( cluster_config.cluster_autoscaling_config. In order to update it, you must use PartialUpdateCluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a particular cloud @@ -1407,8 +2074,10 @@ def update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1421,10 +2090,8 @@ def update_cluster( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Cluster. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, instance.Cluster): request = instance.Cluster(request) @@ -1438,6 +2105,9 @@ def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1467,7 +2137,7 @@ def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1484,6 +2154,35 @@ def partial_update_cluster( cluster_config.cluster_autoscaling_config, and explicitly set a serve_node count via the update_mask. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): The request object. Request message for @@ -1505,8 +2204,10 @@ def partial_update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1519,19 +2220,20 @@ def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): request = bigtable_instance_admin.PartialUpdateClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1553,6 +2255,9 @@ def partial_update_cluster( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1581,10 +2286,33 @@ def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + client.delete_cluster(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): The request object. Request message for @@ -1600,23 +2328,26 @@ def delete_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): request = bigtable_instance_admin.DeleteClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1634,6 +2365,9 @@ def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1653,10 +2387,41 @@ def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = client.create_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): The request object. Request message for @@ -1688,8 +2453,10 @@ def create_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1699,19 +2466,20 @@ def create_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, app_profile_id, app_profile] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): request = bigtable_instance_admin.CreateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1733,6 +2501,9 @@ def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1753,16 +2524,42 @@ def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - name (str): - Required. The unique name of the requested app profile. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = client.get_app_profile(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (str): + Required. The unique name of the requested app profile. Values are of the form ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. @@ -1772,8 +2569,10 @@ def get_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1783,19 +2582,20 @@ def get_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): request = bigtable_instance_admin.GetAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1813,6 +2613,9 @@ def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1833,10 +2636,37 @@ def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): The request object. Request message for @@ -1855,8 +2685,10 @@ def list_app_profiles( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: @@ -1868,19 +2700,20 @@ def list_app_profiles( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListAppProfilesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): request = bigtable_instance_admin.ListAppProfilesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1898,6 +2731,9 @@ def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1912,6 +2748,8 @@ def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1928,10 +2766,43 @@ def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): The request object. Request message for @@ -1954,8 +2825,10 @@ def update_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1966,19 +2839,20 @@ def update_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [app_profile, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.UpdateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): request = bigtable_instance_admin.UpdateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2000,6 +2874,9 @@ def update_app_profile( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2026,12 +2903,37 @@ def delete_app_profile( ] = None, *, name: Optional[str] = None, + ignore_warnings: Optional[bool] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + client.delete_app_profile(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): The request object. Request message for @@ -2044,32 +2946,44 @@ def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + ignore_warnings (bool): + Required. If true, ignore safety + checks when deleting the app profile. + + This corresponds to the ``ignore_warnings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, ignore_warnings] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): request = bigtable_instance_admin.DeleteAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name + if ignore_warnings is not None: + request.ignore_warnings = ignore_warnings # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2081,6 +2995,9 @@ def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2096,12 +3013,39 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for ``GetIamPolicy`` method. @@ -2117,8 +3061,10 @@ def get_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2139,25 +3085,28 @@ def get_iam_policy( constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2165,8 +3114,8 @@ def get_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -2184,6 +3133,9 @@ def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2202,11 +3154,38 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for ``SetIamPolicy`` method. @@ -2222,8 +3201,10 @@ def set_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2244,25 +3225,28 @@ def set_iam_policy( constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2270,8 +3254,8 @@ def set_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -2289,6 +3273,9 @@ def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2308,11 +3295,39 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for ``TestIamPermissions`` method. @@ -2337,17 +3352,22 @@ def test_iam_permissions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2355,8 +3375,8 @@ def test_iam_permissions( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: # Null request, just make one. @@ -2376,6 +3396,9 @@ def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2396,11 +3419,38 @@ def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): The request object. Request message for @@ -2416,8 +3466,10 @@ def list_hot_tablets( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: @@ -2429,19 +3481,20 @@ def list_hot_tablets( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListHotTabletsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): request = bigtable_instance_admin.ListHotTabletsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2459,6 +3512,9 @@ def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2473,12 +3529,1289 @@ def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + logical_view: Optional[instance.LogicalView] = None, + logical_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a logical view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateLogicalView. + parent (str): + Required. The parent instance where this logical view + will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to create. + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view_id (str): + Required. The ID to use for the + logical view, which will become the + final component of the logical view's + resource name. + + This corresponds to the ``logical_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, logical_view, logical_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): + request = bigtable_instance_admin.CreateLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if logical_view is not None: + request.logical_view = logical_view + if logical_view_id is not None: + request.logical_view_id = logical_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + def get_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Gets information about a logical view. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_logical_view(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + name (str): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): + request = bigtable_instance_admin.GetLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, metadata=metadata, ) # Done; return the response. return response + def list_logical_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListLogicalViewsPager: + r"""Lists information about logical views in an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + parent (str): + Required. The unique name of the instance for which the + list of logical views is requested. Values are of the + form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): + request = bigtable_instance_admin.ListLogicalViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_logical_views] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListLogicalViewsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] + ] = None, + *, + logical_view: Optional[instance.LogicalView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a logical view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [logical_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): + request = bigtable_instance_admin.UpdateLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if logical_view is not None: + request.logical_view = logical_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("logical_view.name", request.logical_view.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + def delete_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a logical view from an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + client.delete_logical_view(request=request) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + name (str): + Required. The unique name of the logical view to be + deleted. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): + request = bigtable_instance_admin.DeleteLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + materialized_view: Optional[instance.MaterializedView] = None, + materialized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a materialized view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateMaterializedView. + parent (str): + Required. The parent instance where this materialized + view will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to + create. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view_id (str): + Required. The ID to use for the + materialized view, which will become the + final component of the materialized + view's resource name. + + This corresponds to the ``materialized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, materialized_view, materialized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.CreateMaterializedViewRequest + ): + request = bigtable_instance_admin.CreateMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if materialized_view is not None: + request.materialized_view = materialized_view + if materialized_view_id is not None: + request.materialized_view_id = materialized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + def get_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Gets information about a materialized view. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_materialized_view(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetMaterializedView. + name (str): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.MaterializedView: + A materialized view object that can + be referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): + request = bigtable_instance_admin.GetMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_materialized_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListMaterializedViewsPager: + r"""Lists information about materialized views in an + instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + parent (str): + Required. The unique name of the instance for which the + list of materialized views is requested. Values are of + the form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.ListMaterializedViewsRequest + ): + request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_materialized_views] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMaterializedViewsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] + ] = None, + *, + materialized_view: Optional[instance.MaterializedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a materialized view within an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to + identify the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [materialized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.UpdateMaterializedViewRequest + ): + request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if materialized_view is not None: + request.materialized_view = materialized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("materialized_view.name", request.materialized_view.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + def delete_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a materialized view from an instance. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_materialized_view(request=request) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + name (str): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.DeleteMaterializedViewRequest + ): + request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + def __enter__(self) -> "BigtableInstanceAdminClient": return self @@ -2497,5 +4830,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("BigtableInstanceAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index 0d646a96e..ce5b67b27 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -52,7 +65,9 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -63,12 +78,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +101,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.AppProfile]: @@ -116,7 +143,9 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -127,12 +156,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -145,7 +181,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.AppProfile]: @@ -184,7 +225,9 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -195,12 +238,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -211,7 +261,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.HotTablet]: @@ -248,7 +303,9 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -259,12 +316,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -277,7 +341,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.HotTablet]: @@ -290,3 +359,323 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogicalViewsPager: + """A pager for iterating through ``list_logical_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``logical_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListLogicalViews`` requests and continue to iterate + through the ``logical_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListLogicalViewsResponse], + request: bigtable_instance_admin.ListLogicalViewsRequest, + response: bigtable_instance_admin.ListLogicalViewsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListLogicalViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[instance.LogicalView]: + for page in self.pages: + yield from page.logical_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogicalViewsAsyncPager: + """A pager for iterating through ``list_logical_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``logical_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListLogicalViews`` requests and continue to iterate + through the ``logical_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListLogicalViewsResponse] + ], + request: bigtable_instance_admin.ListLogicalViewsRequest, + response: bigtable_instance_admin.ListLogicalViewsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_instance_admin.ListLogicalViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[instance.LogicalView]: + async def async_generator(): + async for page in self.pages: + for response in page.logical_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMaterializedViewsPager: + """A pager for iterating through ``list_materialized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``materialized_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMaterializedViews`` requests and continue to iterate + through the ``materialized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListMaterializedViewsResponse], + request: bigtable_instance_admin.ListMaterializedViewsRequest, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListMaterializedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[instance.MaterializedView]: + for page in self.pages: + yield from page.materialized_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMaterializedViewsAsyncPager: + """A pager for iterating through ``list_materialized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``materialized_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMaterializedViews`` requests and continue to iterate + through the ``materialized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse] + ], + request: bigtable_instance_admin.ListMaterializedViewsRequest, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_instance_admin.ListMaterializedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[instance.MaterializedView]: + async def async_generator(): + async for page in self.pages: + for response in page.materialized_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst new file mode 100644 index 000000000..9a01ee7c3 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableInstanceAdminTransport` is the ABC for all transports. +- public child `BigtableInstanceAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableInstanceAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableInstanceAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableInstanceAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index 62da28c88..021458f35 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index d92d25453..3a05dd663 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -38,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableInstanceAdminTransport(abc.ABC): """Abstract transport class for BigtableInstanceAdmin.""" @@ -71,15 +75,16 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -96,6 +101,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -108,7 +115,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) @@ -134,6 +141,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -372,6 +383,56 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_logical_view: gapic_v1.method.wrap_method( + self.create_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.get_logical_view: gapic_v1.method.wrap_method( + self.get_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.list_logical_views: gapic_v1.method.wrap_method( + self.list_logical_views, + default_timeout=None, + client_info=client_info, + ), + self.update_logical_view: gapic_v1.method.wrap_method( + self.update_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_logical_view: gapic_v1.method.wrap_method( + self.delete_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.create_materialized_view: gapic_v1.method.wrap_method( + self.create_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.get_materialized_view: gapic_v1.method.wrap_method( + self.get_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_materialized_views: gapic_v1.method.wrap_method( + self.list_materialized_views, + default_timeout=None, + client_info=client_info, + ), + self.update_materialized_view: gapic_v1.method.wrap_method( + self.update_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_materialized_view: gapic_v1.method.wrap_method( + self.delete_materialized_view, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -591,6 +652,102 @@ def list_hot_tablets( ]: raise NotImplementedError() + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], + Union[instance.LogicalView, Awaitable[instance.LogicalView]], + ]: + raise NotImplementedError() + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + Union[ + bigtable_instance_admin.ListLogicalViewsResponse, + Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteLogicalViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], + Union[instance.MaterializedView, Awaitable[instance.MaterializedView]], + ]: + raise NotImplementedError() + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + Union[ + bigtable_instance_admin.ListMaterializedViewsResponse, + Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index eca37957d..d5d5cf1e5 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -33,6 +39,80 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): """gRPC backend transport for BigtableInstanceAdmin. @@ -59,7 +139,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -73,20 +153,24 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -96,11 +180,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -127,9 +211,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -168,7 +253,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -184,7 +271,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -205,9 +297,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -248,7 +341,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -280,7 +375,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -306,7 +401,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -335,7 +430,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -362,7 +457,7 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -392,7 +487,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -418,7 +513,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -452,7 +547,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -478,7 +573,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -507,7 +602,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -535,7 +630,7 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -576,7 +671,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -602,7 +697,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -630,7 +725,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -656,7 +751,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -685,7 +780,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -713,7 +808,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -739,7 +834,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -767,7 +862,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -794,7 +889,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -824,7 +919,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -854,15 +949,298 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, ) return self._stubs["list_hot_tablets"] + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create logical view method over gRPC. + + Creates a logical view within an instance. + + Returns: + Callable[[~.CreateLogicalViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_logical_view" not in self._stubs: + self._stubs["create_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", + request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_logical_view"] + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView + ]: + r"""Return a callable for the get logical view method over gRPC. + + Gets information about a logical view. + + Returns: + Callable[[~.GetLogicalViewRequest], + ~.LogicalView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_logical_view" not in self._stubs: + self._stubs["get_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", + request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, + response_deserializer=instance.LogicalView.deserialize, + ) + return self._stubs["get_logical_view"] + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + bigtable_instance_admin.ListLogicalViewsResponse, + ]: + r"""Return a callable for the list logical views method over gRPC. + + Lists information about logical views in an instance. + + Returns: + Callable[[~.ListLogicalViewsRequest], + ~.ListLogicalViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logical_views" not in self._stubs: + self._stubs["list_logical_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", + request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, + ) + return self._stubs["list_logical_views"] + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update logical view method over gRPC. + + Updates a logical view within an instance. + + Returns: + Callable[[~.UpdateLogicalViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_logical_view" not in self._stubs: + self._stubs["update_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", + request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_logical_view"] + + @property + def delete_logical_view( + self, + ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: + r"""Return a callable for the delete logical view method over gRPC. + + Deletes a logical view from an instance. + + Returns: + Callable[[~.DeleteLogicalViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_logical_view" not in self._stubs: + self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", + request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_logical_view"] + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create materialized view method over gRPC. + + Creates a materialized view within an instance. + + Returns: + Callable[[~.CreateMaterializedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_materialized_view" not in self._stubs: + self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", + request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_materialized_view"] + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView + ]: + r"""Return a callable for the get materialized view method over gRPC. + + Gets information about a materialized view. + + Returns: + Callable[[~.GetMaterializedViewRequest], + ~.MaterializedView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_materialized_view" not in self._stubs: + self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", + request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, + response_deserializer=instance.MaterializedView.deserialize, + ) + return self._stubs["get_materialized_view"] + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + bigtable_instance_admin.ListMaterializedViewsResponse, + ]: + r"""Return a callable for the list materialized views method over gRPC. + + Lists information about materialized views in an + instance. + + Returns: + Callable[[~.ListMaterializedViewsRequest], + ~.ListMaterializedViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_materialized_views" not in self._stubs: + self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", + request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, + ) + return self._stubs["list_materialized_views"] + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the update materialized view method over gRPC. + + Updates a materialized view within an instance. + + Returns: + Callable[[~.UpdateMaterializedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_materialized_view" not in self._stubs: + self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", + request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_materialized_view"] + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty + ]: + r"""Return a callable for the delete materialized view method over gRPC. + + Deletes a materialized view from an instance. + + Returns: + Callable[[~.DeleteMaterializedViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_materialized_view" not in self._stubs: + self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", + request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_materialized_view"] + def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 145aa427d..7ce762764 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,16 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -34,6 +43,82 @@ from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableInstanceAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): """gRPC AsyncIO backend transport for BigtableInstanceAdmin. @@ -72,9 +157,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -104,7 +189,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -118,21 +203,25 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -142,11 +231,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -173,9 +262,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -213,7 +303,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -229,7 +321,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -252,7 +350,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -286,7 +384,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -314,7 +412,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -343,7 +441,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -372,7 +470,7 @@ def update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -403,7 +501,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -431,7 +529,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -466,7 +564,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -494,7 +592,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -523,7 +621,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -553,7 +651,7 @@ def update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -595,7 +693,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -623,7 +721,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -652,7 +750,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -680,7 +778,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -709,7 +807,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -738,7 +836,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -766,7 +864,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -794,7 +892,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -821,7 +919,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -851,7 +949,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -881,15 +979,603 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, ) return self._stubs["list_hot_tablets"] + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create logical view method over gRPC. + + Creates a logical view within an instance. + + Returns: + Callable[[~.CreateLogicalViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_logical_view" not in self._stubs: + self._stubs["create_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", + request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_logical_view"] + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], Awaitable[instance.LogicalView] + ]: + r"""Return a callable for the get logical view method over gRPC. + + Gets information about a logical view. + + Returns: + Callable[[~.GetLogicalViewRequest], + Awaitable[~.LogicalView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_logical_view" not in self._stubs: + self._stubs["get_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", + request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, + response_deserializer=instance.LogicalView.deserialize, + ) + return self._stubs["get_logical_view"] + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], + ]: + r"""Return a callable for the list logical views method over gRPC. + + Lists information about logical views in an instance. + + Returns: + Callable[[~.ListLogicalViewsRequest], + Awaitable[~.ListLogicalViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logical_views" not in self._stubs: + self._stubs["list_logical_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", + request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, + ) + return self._stubs["list_logical_views"] + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update logical view method over gRPC. + + Updates a logical view within an instance. + + Returns: + Callable[[~.UpdateLogicalViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_logical_view" not in self._stubs: + self._stubs["update_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", + request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_logical_view"] + + @property + def delete_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteLogicalViewRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete logical view method over gRPC. + + Deletes a logical view from an instance. + + Returns: + Callable[[~.DeleteLogicalViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_logical_view" not in self._stubs: + self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", + request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_logical_view"] + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create materialized view method over gRPC. + + Creates a materialized view within an instance. + + Returns: + Callable[[~.CreateMaterializedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_materialized_view" not in self._stubs: + self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", + request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_materialized_view"] + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], + Awaitable[instance.MaterializedView], + ]: + r"""Return a callable for the get materialized view method over gRPC. + + Gets information about a materialized view. + + Returns: + Callable[[~.GetMaterializedViewRequest], + Awaitable[~.MaterializedView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_materialized_view" not in self._stubs: + self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", + request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, + response_deserializer=instance.MaterializedView.deserialize, + ) + return self._stubs["get_materialized_view"] + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], + ]: + r"""Return a callable for the list materialized views method over gRPC. + + Lists information about materialized views in an + instance. + + Returns: + Callable[[~.ListMaterializedViewsRequest], + Awaitable[~.ListMaterializedViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_materialized_views" not in self._stubs: + self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", + request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, + ) + return self._stubs["list_materialized_views"] + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update materialized view method over gRPC. + + Updates a materialized view within an instance. + + Returns: + Callable[[~.UpdateMaterializedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_materialized_view" not in self._stubs: + self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", + request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_materialized_view"] + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], + Awaitable[empty_pb2.Empty], + ]: + r"""Return a callable for the delete materialized view method over gRPC. + + Deletes a materialized view from an instance. + + Returns: + Callable[[~.DeleteMaterializedViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_materialized_view" not in self._stubs: + self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", + request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_materialized_view"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_instance: self._wrap_method( + self.create_instance, + default_timeout=300.0, + client_info=client_info, + ), + self.get_instance: self._wrap_method( + self.get_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_instances: self._wrap_method( + self.list_instances, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_instance: self._wrap_method( + self.update_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_instance: self._wrap_method( + self.partial_update_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_instance: self._wrap_method( + self.delete_instance, + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: self._wrap_method( + self.create_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: self._wrap_method( + self.get_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_clusters: self._wrap_method( + self.list_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: self._wrap_method( + self.update_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_cluster: self._wrap_method( + self.partial_update_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_cluster: self._wrap_method( + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.create_app_profile: self._wrap_method( + self.create_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_app_profile: self._wrap_method( + self.get_app_profile, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_app_profiles: self._wrap_method( + self.list_app_profiles, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_app_profile: self._wrap_method( + self.update_app_profile, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_app_profile: self._wrap_method( + self.delete_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_hot_tablets: self._wrap_method( + self.list_hot_tablets, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_logical_view: self._wrap_method( + self.create_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.get_logical_view: self._wrap_method( + self.get_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.list_logical_views: self._wrap_method( + self.list_logical_views, + default_timeout=None, + client_info=client_info, + ), + self.update_logical_view: self._wrap_method( + self.update_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_logical_view: self._wrap_method( + self.delete_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.create_materialized_view: self._wrap_method( + self.create_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.get_materialized_view: self._wrap_method( + self.get_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_materialized_views: self._wrap_method( + self.list_materialized_views, + default_timeout=None, + client_info=client_info, + ), + self.update_materialized_view: self._wrap_method( + self.update_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_materialized_view: self._wrap_method( + self.delete_materialized_view, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" __all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 9d5502b7e..9879c4c45 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,32 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -47,18 +41,33 @@ from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from .base import ( - BigtableInstanceAdminTransport, - DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, -) +from .rest_base import _BaseBigtableInstanceAdminRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableInstanceAdminRestInterceptor: """Interceptor for BigtableInstanceAdmin. @@ -99,6 +108,22 @@ def post_create_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_create_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_delete_app_profile(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -111,6 +136,14 @@ def pre_delete_instance(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata + def pre_delete_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_get_app_profile(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -143,6 +176,22 @@ def post_get_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_get_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_app_profiles(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -175,6 +224,22 @@ def post_list_instances(self, response): logging.log(f"Received response: {response}") return response + def pre_list_logical_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_logical_views(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_materialized_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_materialized_views(self, response): + logging.log(f"Received response: {response}") + return response + def pre_partial_update_cluster(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -231,6 +296,22 @@ def post_update_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_update_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) client = BigtableInstanceAdminClient(transport=transport) @@ -240,9 +321,10 @@ def post_update_instance(self, response): def pre_create_app_profile( self, request: bigtable_instance_admin.CreateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_app_profile @@ -256,17 +338,43 @@ def post_create_app_profile( ) -> instance.AppProfile: """Post-rpc interceptor for create_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_app_profile` interceptor runs + before the `post_create_app_profile_with_metadata` interceptor. """ return response + def post_create_app_profile_with_metadata( + self, + response: instance.AppProfile, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_app_profile_with_metadata` + interceptor in new development instead of the `post_create_app_profile` interceptor. + When both interceptors are used, this `post_create_app_profile_with_metadata` interceptor runs after the + `post_create_app_profile` interceptor. The (possibly modified) response returned by + `post_create_app_profile` will be passed to + `post_create_app_profile_with_metadata`. + """ + return response, metadata + def pre_create_cluster( self, request: bigtable_instance_admin.CreateClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_cluster Override in a subclass to manipulate the request or metadata @@ -279,18 +387,42 @@ def post_create_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_cluster` interceptor runs + before the `post_create_cluster_with_metadata` interceptor. """ return response + def post_create_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_cluster_with_metadata` + interceptor in new development instead of the `post_create_cluster` interceptor. + When both interceptors are used, this `post_create_cluster_with_metadata` interceptor runs after the + `post_create_cluster` interceptor. The (possibly modified) response returned by + `post_create_cluster` will be passed to + `post_create_cluster_with_metadata`. + """ + return response, metadata + def pre_create_instance( self, request: bigtable_instance_admin.CreateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_instance @@ -304,18 +436,140 @@ def post_create_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_create_instance` interceptor runs + before the `post_create_instance_with_metadata` interceptor. + """ + return response + + def post_create_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_instance_with_metadata` + interceptor in new development instead of the `post_create_instance` interceptor. + When both interceptors are used, this `post_create_instance_with_metadata` interceptor runs after the + `post_create_instance` interceptor. The (possibly modified) response returned by + `post_create_instance` will be passed to + `post_create_instance_with_metadata`. + """ + return response, metadata + + def pre_create_logical_view( + self, + request: bigtable_instance_admin.CreateLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_logical_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_logical_view + + DEPRECATED. Please use the `post_create_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_create_logical_view` interceptor runs + before the `post_create_logical_view_with_metadata` interceptor. + """ + return response + + def post_create_logical_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_logical_view_with_metadata` + interceptor in new development instead of the `post_create_logical_view` interceptor. + When both interceptors are used, this `post_create_logical_view_with_metadata` interceptor runs after the + `post_create_logical_view` interceptor. The (possibly modified) response returned by + `post_create_logical_view` will be passed to + `post_create_logical_view_with_metadata`. + """ + return response, metadata + + def pre_create_materialized_view( + self, + request: bigtable_instance_admin.CreateMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_materialized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_materialized_view + + DEPRECATED. Please use the `post_create_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_materialized_view` interceptor runs + before the `post_create_materialized_view_with_metadata` interceptor. """ return response + def post_create_materialized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_materialized_view_with_metadata` + interceptor in new development instead of the `post_create_materialized_view` interceptor. + When both interceptors are used, this `post_create_materialized_view_with_metadata` interceptor runs after the + `post_create_materialized_view` interceptor. The (possibly modified) response returned by + `post_create_materialized_view` will be passed to + `post_create_materialized_view_with_metadata`. + """ + return response, metadata + def pre_delete_app_profile( self, request: bigtable_instance_admin.DeleteAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_app_profile @@ -327,8 +581,11 @@ def pre_delete_app_profile( def pre_delete_cluster( self, request: bigtable_instance_admin.DeleteClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_cluster Override in a subclass to manipulate the request or metadata @@ -339,9 +596,10 @@ def pre_delete_cluster( def pre_delete_instance( self, request: bigtable_instance_admin.DeleteInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_instance @@ -350,11 +608,44 @@ def pre_delete_instance( """ return request, metadata + def pre_delete_logical_view( + self, + request: bigtable_instance_admin.DeleteLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_materialized_view( + self, + request: bigtable_instance_admin.DeleteMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + def pre_get_app_profile( self, request: bigtable_instance_admin.GetAppProfileRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_app_profile Override in a subclass to manipulate the request or metadata @@ -367,17 +658,43 @@ def post_get_app_profile( ) -> instance.AppProfile: """Post-rpc interceptor for get_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_app_profile` interceptor runs + before the `post_get_app_profile_with_metadata` interceptor. """ return response + def post_get_app_profile_with_metadata( + self, + response: instance.AppProfile, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_app_profile_with_metadata` + interceptor in new development instead of the `post_get_app_profile` interceptor. + When both interceptors are used, this `post_get_app_profile_with_metadata` interceptor runs after the + `post_get_app_profile` interceptor. The (possibly modified) response returned by + `post_get_app_profile` will be passed to + `post_get_app_profile_with_metadata`. + """ + return response, metadata + def pre_get_cluster( self, request: bigtable_instance_admin.GetClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_cluster Override in a subclass to manipulate the request or metadata @@ -388,17 +705,42 @@ def pre_get_cluster( def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: """Post-rpc interceptor for get_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_cluster` interceptor runs + before the `post_get_cluster_with_metadata` interceptor. """ return response + def post_get_cluster_with_metadata( + self, + response: instance.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_cluster_with_metadata` + interceptor in new development instead of the `post_get_cluster` interceptor. + When both interceptors are used, this `post_get_cluster_with_metadata` interceptor runs after the + `post_get_cluster` interceptor. The (possibly modified) response returned by + `post_get_cluster` will be passed to + `post_get_cluster_with_metadata`. + """ + return response, metadata + def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -409,17 +751,43 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. """ return response + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + def pre_get_instance( self, request: bigtable_instance_admin.GetInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_instance Override in a subclass to manipulate the request or metadata @@ -430,18 +798,140 @@ def pre_get_instance( def post_get_instance(self, response: instance.Instance) -> instance.Instance: """Post-rpc interceptor for get_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_get_instance` interceptor runs + before the `post_get_instance_with_metadata` interceptor. + """ + return response + + def post_get_instance_with_metadata( + self, + response: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_instance_with_metadata` + interceptor in new development instead of the `post_get_instance` interceptor. + When both interceptors are used, this `post_get_instance_with_metadata` interceptor runs after the + `post_get_instance` interceptor. The (possibly modified) response returned by + `post_get_instance` will be passed to + `post_get_instance_with_metadata`. + """ + return response, metadata + + def pre_get_logical_view( + self, + request: bigtable_instance_admin.GetLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_logical_view( + self, response: instance.LogicalView + ) -> instance.LogicalView: + """Post-rpc interceptor for get_logical_view + + DEPRECATED. Please use the `post_get_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_get_logical_view` interceptor runs + before the `post_get_logical_view_with_metadata` interceptor. + """ + return response + + def post_get_logical_view_with_metadata( + self, + response: instance.LogicalView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.LogicalView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_logical_view_with_metadata` + interceptor in new development instead of the `post_get_logical_view` interceptor. + When both interceptors are used, this `post_get_logical_view_with_metadata` interceptor runs after the + `post_get_logical_view` interceptor. The (possibly modified) response returned by + `post_get_logical_view` will be passed to + `post_get_logical_view_with_metadata`. + """ + return response, metadata + + def pre_get_materialized_view( + self, + request: bigtable_instance_admin.GetMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_materialized_view( + self, response: instance.MaterializedView + ) -> instance.MaterializedView: + """Post-rpc interceptor for get_materialized_view + + DEPRECATED. Please use the `post_get_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_materialized_view` interceptor runs + before the `post_get_materialized_view_with_metadata` interceptor. """ return response + def post_get_materialized_view_with_metadata( + self, + response: instance.MaterializedView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.MaterializedView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_materialized_view_with_metadata` + interceptor in new development instead of the `post_get_materialized_view` interceptor. + When both interceptors are used, this `post_get_materialized_view_with_metadata` interceptor runs after the + `post_get_materialized_view` interceptor. The (possibly modified) response returned by + `post_get_materialized_view` will be passed to + `post_get_materialized_view_with_metadata`. + """ + return response, metadata + def pre_list_app_profiles( self, request: bigtable_instance_admin.ListAppProfilesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListAppProfilesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_app_profiles @@ -455,17 +945,46 @@ def post_list_app_profiles( ) -> bigtable_instance_admin.ListAppProfilesResponse: """Post-rpc interceptor for list_app_profiles - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_app_profiles_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_app_profiles` interceptor runs + before the `post_list_app_profiles_with_metadata` interceptor. """ return response + def post_list_app_profiles_with_metadata( + self, + response: bigtable_instance_admin.ListAppProfilesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListAppProfilesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_app_profiles + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_app_profiles_with_metadata` + interceptor in new development instead of the `post_list_app_profiles` interceptor. + When both interceptors are used, this `post_list_app_profiles_with_metadata` interceptor runs after the + `post_list_app_profiles` interceptor. The (possibly modified) response returned by + `post_list_app_profiles` will be passed to + `post_list_app_profiles_with_metadata`. + """ + return response, metadata + def pre_list_clusters( self, request: bigtable_instance_admin.ListClustersRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_clusters Override in a subclass to manipulate the request or metadata @@ -478,18 +997,45 @@ def post_list_clusters( ) -> bigtable_instance_admin.ListClustersResponse: """Post-rpc interceptor for list_clusters - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_clusters_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_clusters` interceptor runs + before the `post_list_clusters_with_metadata` interceptor. """ return response + def post_list_clusters_with_metadata( + self, + response: bigtable_instance_admin.ListClustersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListClustersResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_clusters + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_clusters_with_metadata` + interceptor in new development instead of the `post_list_clusters` interceptor. + When both interceptors are used, this `post_list_clusters_with_metadata` interceptor runs after the + `post_list_clusters` interceptor. The (possibly modified) response returned by + `post_list_clusters` will be passed to + `post_list_clusters_with_metadata`. + """ + return response, metadata + def pre_list_hot_tablets( self, request: bigtable_instance_admin.ListHotTabletsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListHotTabletsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_hot_tablets @@ -503,17 +1049,46 @@ def post_list_hot_tablets( ) -> bigtable_instance_admin.ListHotTabletsResponse: """Post-rpc interceptor for list_hot_tablets - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_hot_tablets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_hot_tablets` interceptor runs + before the `post_list_hot_tablets_with_metadata` interceptor. """ return response + def post_list_hot_tablets_with_metadata( + self, + response: bigtable_instance_admin.ListHotTabletsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListHotTabletsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_hot_tablets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_hot_tablets_with_metadata` + interceptor in new development instead of the `post_list_hot_tablets` interceptor. + When both interceptors are used, this `post_list_hot_tablets_with_metadata` interceptor runs after the + `post_list_hot_tablets` interceptor. The (possibly modified) response returned by + `post_list_hot_tablets` will be passed to + `post_list_hot_tablets_with_metadata`. + """ + return response, metadata + def pre_list_instances( self, request: bigtable_instance_admin.ListInstancesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListInstancesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_instances Override in a subclass to manipulate the request or metadata @@ -526,18 +1101,149 @@ def post_list_instances( ) -> bigtable_instance_admin.ListInstancesResponse: """Post-rpc interceptor for list_instances - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instances_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_list_instances` interceptor runs + before the `post_list_instances_with_metadata` interceptor. + """ + return response + + def post_list_instances_with_metadata( + self, + response: bigtable_instance_admin.ListInstancesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListInstancesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instances + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instances_with_metadata` + interceptor in new development instead of the `post_list_instances` interceptor. + When both interceptors are used, this `post_list_instances_with_metadata` interceptor runs after the + `post_list_instances` interceptor. The (possibly modified) response returned by + `post_list_instances` will be passed to + `post_list_instances_with_metadata`. + """ + return response, metadata + + def pre_list_logical_views( + self, + request: bigtable_instance_admin.ListLogicalViewsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListLogicalViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_logical_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_logical_views( + self, response: bigtable_instance_admin.ListLogicalViewsResponse + ) -> bigtable_instance_admin.ListLogicalViewsResponse: + """Post-rpc interceptor for list_logical_views + + DEPRECATED. Please use the `post_list_logical_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_list_logical_views` interceptor runs + before the `post_list_logical_views_with_metadata` interceptor. + """ + return response + + def post_list_logical_views_with_metadata( + self, + response: bigtable_instance_admin.ListLogicalViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListLogicalViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_logical_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_logical_views_with_metadata` + interceptor in new development instead of the `post_list_logical_views` interceptor. + When both interceptors are used, this `post_list_logical_views_with_metadata` interceptor runs after the + `post_list_logical_views` interceptor. The (possibly modified) response returned by + `post_list_logical_views` will be passed to + `post_list_logical_views_with_metadata`. + """ + return response, metadata + + def pre_list_materialized_views( + self, + request: bigtable_instance_admin.ListMaterializedViewsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListMaterializedViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_materialized_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_materialized_views( + self, response: bigtable_instance_admin.ListMaterializedViewsResponse + ) -> bigtable_instance_admin.ListMaterializedViewsResponse: + """Post-rpc interceptor for list_materialized_views + + DEPRECATED. Please use the `post_list_materialized_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_materialized_views` interceptor runs + before the `post_list_materialized_views_with_metadata` interceptor. """ return response + def post_list_materialized_views_with_metadata( + self, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListMaterializedViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_materialized_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_materialized_views_with_metadata` + interceptor in new development instead of the `post_list_materialized_views` interceptor. + When both interceptors are used, this `post_list_materialized_views_with_metadata` interceptor runs after the + `post_list_materialized_views` interceptor. The (possibly modified) response returned by + `post_list_materialized_views` will be passed to + `post_list_materialized_views_with_metadata`. + """ + return response, metadata + def pre_partial_update_cluster( self, request: bigtable_instance_admin.PartialUpdateClusterRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.PartialUpdateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for partial_update_cluster @@ -551,18 +1257,42 @@ def post_partial_update_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for partial_update_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partial_update_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_partial_update_cluster` interceptor runs + before the `post_partial_update_cluster_with_metadata` interceptor. """ return response + def post_partial_update_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partial_update_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_partial_update_cluster_with_metadata` + interceptor in new development instead of the `post_partial_update_cluster` interceptor. + When both interceptors are used, this `post_partial_update_cluster_with_metadata` interceptor runs after the + `post_partial_update_cluster` interceptor. The (possibly modified) response returned by + `post_partial_update_cluster` will be passed to + `post_partial_update_cluster_with_metadata`. + """ + return response, metadata + def pre_partial_update_instance( self, request: bigtable_instance_admin.PartialUpdateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.PartialUpdateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for partial_update_instance @@ -576,17 +1306,42 @@ def post_partial_update_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for partial_update_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partial_update_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_partial_update_instance` interceptor runs + before the `post_partial_update_instance_with_metadata` interceptor. """ return response + def post_partial_update_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partial_update_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_partial_update_instance_with_metadata` + interceptor in new development instead of the `post_partial_update_instance` interceptor. + When both interceptors are used, this `post_partial_update_instance_with_metadata` interceptor runs after the + `post_partial_update_instance` interceptor. The (possibly modified) response returned by + `post_partial_update_instance` will be passed to + `post_partial_update_instance_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -597,17 +1352,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -620,18 +1401,45 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_update_app_profile( self, request: bigtable_instance_admin.UpdateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.UpdateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_app_profile @@ -645,15 +1453,40 @@ def post_update_app_profile( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_app_profile` interceptor runs + before the `post_update_app_profile_with_metadata` interceptor. """ return response + def post_update_app_profile_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_app_profile_with_metadata` + interceptor in new development instead of the `post_update_app_profile` interceptor. + When both interceptors are used, this `post_update_app_profile_with_metadata` interceptor runs after the + `post_update_app_profile` interceptor. The (possibly modified) response returned by + `post_update_app_profile` will be passed to + `post_update_app_profile_with_metadata`. + """ + return response, metadata + def pre_update_cluster( - self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: + self, + request: instance.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_cluster Override in a subclass to manipulate the request or metadata @@ -666,15 +1499,40 @@ def post_update_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_cluster` interceptor runs + before the `post_update_cluster_with_metadata` interceptor. """ return response + def post_update_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_cluster_with_metadata` + interceptor in new development instead of the `post_update_cluster` interceptor. + When both interceptors are used, this `post_update_cluster_with_metadata` interceptor runs after the + `post_update_cluster` interceptor. The (possibly modified) response returned by + `post_update_cluster` will be passed to + `post_update_cluster_with_metadata`. + """ + return response, metadata + def pre_update_instance( - self, request: instance.Instance, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: + self, + request: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_instance Override in a subclass to manipulate the request or metadata @@ -685,12 +1543,133 @@ def pre_update_instance( def post_update_instance(self, response: instance.Instance) -> instance.Instance: """Post-rpc interceptor for update_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_update_instance` interceptor runs + before the `post_update_instance_with_metadata` interceptor. + """ + return response + + def post_update_instance_with_metadata( + self, + response: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_instance_with_metadata` + interceptor in new development instead of the `post_update_instance` interceptor. + When both interceptors are used, this `post_update_instance_with_metadata` interceptor runs after the + `post_update_instance` interceptor. The (possibly modified) response returned by + `post_update_instance` will be passed to + `post_update_instance_with_metadata`. + """ + return response, metadata + + def pre_update_logical_view( + self, + request: bigtable_instance_admin.UpdateLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.UpdateLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_logical_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_logical_view + + DEPRECATED. Please use the `post_update_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_update_logical_view` interceptor runs + before the `post_update_logical_view_with_metadata` interceptor. + """ + return response + + def post_update_logical_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_logical_view_with_metadata` + interceptor in new development instead of the `post_update_logical_view` interceptor. + When both interceptors are used, this `post_update_logical_view_with_metadata` interceptor runs after the + `post_update_logical_view` interceptor. The (possibly modified) response returned by + `post_update_logical_view` will be passed to + `post_update_logical_view_with_metadata`. + """ + return response, metadata + + def pre_update_materialized_view( + self, + request: bigtable_instance_admin.UpdateMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.UpdateMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_materialized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_materialized_view + + DEPRECATED. Please use the `post_update_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_materialized_view` interceptor runs + before the `post_update_materialized_view_with_metadata` interceptor. """ return response + def post_update_materialized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_materialized_view_with_metadata` + interceptor in new development instead of the `post_update_materialized_view` interceptor. + When both interceptors are used, this `post_update_materialized_view_with_metadata` interceptor runs after the + `post_update_materialized_view` interceptor. The (possibly modified) response returned by + `post_update_materialized_view` will be passed to + `post_update_materialized_view_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableInstanceAdminRestStub: @@ -699,8 +1678,8 @@ class BigtableInstanceAdminRestStub: _interceptor: BigtableInstanceAdminRestInterceptor -class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): - """REST backend transport for BigtableInstanceAdmin. +class BigtableInstanceAdminRestTransport(_BaseBigtableInstanceAdminRestTransport): + """REST backend synchronous transport for BigtableInstanceAdmin. Service for creating, configuring, and deleting Cloud Bigtable Instances and Clusters. Provides access to the Instance @@ -712,7 +1691,6 @@ class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -734,16 +1712,17 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -766,21 +1745,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -844,21 +1814,35 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: # Return the client from cache. return self._operations_client - class _CreateAppProfile(BigtableInstanceAdminRestStub): + class _CreateAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "appProfileId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -866,7 +1850,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the create app profile method over HTTP. @@ -877,8 +1861,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -888,50 +1874,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - "body": "app_profile", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_http_options() + ) + request, metadata = self._interceptor.pre_create_app_profile( request, metadata ) - pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._CreateAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -944,24 +1944,64 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CreateCluster(BigtableInstanceAdminRestStub): + class _CreateCluster( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "clusterId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -969,7 +2009,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create cluster method over HTTP. @@ -980,8 +2020,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -991,48 +2033,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - "body": "cluster", - }, - ] - request, metadata = self._interceptor.pre_create_cluster(request, metadata) - pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_create_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._CreateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1043,22 +2097,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CreateInstance(BigtableInstanceAdminRestStub): + class _CreateInstance( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1066,7 +2162,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance method over HTTP. @@ -1077,8 +2173,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1088,48 +2186,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*}/instances", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_create_instance(request, metadata) - pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_create_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._CreateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1140,81 +2250,308 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _DeleteAppProfile(BigtableInstanceAdminRestStub): + class _CreateLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("DeleteAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "ignoreWarnings": False, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - + return hash("BigtableInstanceAdminRestTransport.CreateLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + def __call__( self, - request: bigtable_instance_admin.DeleteAppProfileRequest, + request: bigtable_instance_admin.CreateLogicalViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ): - r"""Call the delete app profile method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create logical view method over HTTP. Args: - request (~.bigtable_instance_admin.DeleteAppProfileRequest): + request (~.bigtable_instance_admin.CreateLogicalViewRequest): The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. + BigtableInstanceAdmin.CreateLogicalView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] - request, metadata = self._interceptor.pre_delete_app_profile( + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_logical_view( request, metadata ) - pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_transcoded_request( + http_options, request + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_request_body_json( + transcoded_request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = ( + BigtableInstanceAdminRestTransport._CreateLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.CreateMaterializedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.CreateMaterializedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create materialized view method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateMaterializedViewRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateMaterializedView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._CreateMaterializedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1222,94 +2559,286 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("DeleteCluster") + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + resp = self._interceptor.post_create_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + class _DeleteAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, - request: bigtable_instance_admin.DeleteClusterRequest, + request: bigtable_instance_admin.DeleteAppProfileRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): - r"""Call the delete cluster method over HTTP. + r"""Call the delete app profile method over HTTP. Args: - request (~.bigtable_instance_admin.DeleteClusterRequest): + request (~.bigtable_instance_admin.DeleteAppProfileRequest): The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. + BigtableInstanceAdmin.DeleteAppProfile. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] - request, metadata = self._interceptor.pre_delete_cluster(request, metadata) - pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_delete_app_profile( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = ( + BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteCluster( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteInstance(BigtableInstanceAdminRestStub): + class _DeleteInstance( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("DeleteInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.DeleteInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1317,7 +2846,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete instance method over HTTP. @@ -1328,63 +2857,318 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() + ) + request, metadata = self._interceptor.pre_delete_instance(request, metadata) - pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + class _DeleteLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): uri = transcoded_request["uri"] method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteLogicalViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete logical view method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = ( + BigtableInstanceAdminRestTransport._DeleteLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteMaterializedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteMaterializedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete materialized view method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteMaterializedViewRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._DeleteMaterializedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _GetAppProfile(BigtableInstanceAdminRestStub): + class _GetAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1392,7 +3176,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the get app profile method over HTTP. @@ -1403,8 +3187,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -1414,39 +3200,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] - request, metadata = self._interceptor.pre_get_app_profile(request, metadata) - pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_app_profile(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._GetAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1459,22 +3261,63 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetCluster(BigtableInstanceAdminRestStub): + class _GetCluster( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1482,7 +3325,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Call the get cluster method over HTTP. @@ -1493,8 +3336,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Cluster: @@ -1505,39 +3350,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] - request, metadata = self._interceptor.pre_get_cluster(request, metadata) - pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._GetCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1550,22 +3411,64 @@ def __call__( pb_resp = instance.Cluster.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Cluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetIamPolicy(BigtableInstanceAdminRestStub): + class _GetIamPolicy( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1573,7 +3476,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -1583,8 +3486,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -1666,48 +3571,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1720,22 +3637,63 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetInstance(BigtableInstanceAdminRestStub): + class _GetInstance( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1743,7 +3701,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Call the get instance method over HTTP. @@ -1754,8 +3712,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Instance: @@ -1768,39 +3728,357 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] - request, metadata = self._interceptor.pre_get_instance(request, metadata) - pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._GetInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.GetLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.GetLogicalViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Call the get logical view method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.instance.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._GetLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.LogicalView() + pb_resp = instance.LogicalView.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.LogicalView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.GetMaterializedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.GetMaterializedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Call the get materialized view method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetMaterializedViewRequest): + The request object. Request message for + BigtableInstanceAdmin.GetMaterializedView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.instance.MaterializedView: + A materialized view object that can + be referenced in SQL queries. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableInstanceAdminRestTransport._GetMaterializedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1809,26 +4087,67 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) + resp = instance.MaterializedView() + pb_resp = instance.MaterializedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_instance(resp) + + resp = self._interceptor.post_get_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.MaterializedView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListAppProfiles(BigtableInstanceAdminRestStub): + class _ListAppProfiles( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListAppProfiles") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListAppProfiles") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1836,7 +4155,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListAppProfilesResponse: r"""Call the list app profiles method over HTTP. @@ -1847,8 +4166,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListAppProfilesResponse: @@ -1857,41 +4178,59 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_http_options() + ) + request, metadata = self._interceptor.pre_list_app_profiles( request, metadata ) - pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListAppProfiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableInstanceAdminRestTransport._ListAppProfiles._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1904,22 +4243,67 @@ def __call__( pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_app_profiles(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_app_profiles_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListAppProfilesResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_app_profiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListClusters(BigtableInstanceAdminRestStub): + class _ListClusters( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListClusters") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListClusters") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1927,7 +4311,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Call the list clusters method over HTTP. @@ -1938,8 +4322,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListClustersResponse: @@ -1948,39 +4334,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - }, - ] - request, metadata = self._interceptor.pre_list_clusters(request, metadata) - pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_list_clusters(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListClusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._ListClusters._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1993,22 +4395,65 @@ def __call__( pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_clusters_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListClustersResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_clusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListHotTablets(BigtableInstanceAdminRestStub): + class _ListHotTablets( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListHotTablets") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListHotTablets") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2016,7 +4461,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListHotTabletsResponse: r"""Call the list hot tablets method over HTTP. @@ -2027,8 +4472,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListHotTabletsResponse: @@ -2037,41 +4484,57 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_http_options() + ) + request, metadata = self._interceptor.pre_list_hot_tablets( request, metadata ) - pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListHotTablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._ListHotTablets._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2084,22 +4547,65 @@ def __call__( pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_hot_tablets(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_hot_tablets_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListHotTabletsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_hot_tablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListInstances(BigtableInstanceAdminRestStub): + class _ListInstances( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListInstances") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListInstances") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2107,7 +4613,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Call the list instances method over HTTP. @@ -2118,8 +4624,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListInstancesResponse: @@ -2128,40 +4636,366 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*}/instances", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_http_options() + ) + request, metadata = self._interceptor.pre_list_instances(request, metadata) - pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListInstances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._ListInstances._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListInstancesResponse() + pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_instances(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_instances_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListInstancesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_instances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _ListLogicalViews( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.ListLogicalViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): uri = transcoded_request["uri"] method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.ListLogicalViewsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_instance_admin.ListLogicalViewsResponse: + r"""Call the list logical views method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListLogicalViewsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_instance_admin.ListLogicalViewsResponse: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_logical_views( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListLogicalViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListLogicalViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = ( + BigtableInstanceAdminRestTransport._ListLogicalViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListLogicalViewsResponse() + pb_resp = bigtable_instance_admin.ListLogicalViewsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_logical_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_logical_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListLogicalViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_logical_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListLogicalViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListMaterializedViews( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.ListMaterializedViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), ) + return response + + def __call__( + self, + request: bigtable_instance_admin.ListMaterializedViewsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_instance_admin.ListMaterializedViewsResponse: + r"""Call the list materialized views method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListMaterializedViewsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_instance_admin.ListMaterializedViewsResponse: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_materialized_views( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListMaterializedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListMaterializedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._ListMaterializedViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. @@ -2169,28 +5003,72 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_instance_admin.ListInstancesResponse() - pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + resp = bigtable_instance_admin.ListMaterializedViewsResponse() + pb_resp = bigtable_instance_admin.ListMaterializedViewsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_instances(resp) + + resp = self._interceptor.post_list_materialized_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_materialized_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListMaterializedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_materialized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListMaterializedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _PartialUpdateCluster(BigtableInstanceAdminRestStub): + class _PartialUpdateCluster( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("PartialUpdateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.PartialUpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2198,7 +5076,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update cluster method over HTTP. @@ -2209,8 +5087,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2220,50 +5100,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", - "body": "cluster", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_http_options() + ) + request, metadata = self._interceptor.pre_partial_update_cluster( request, metadata ) - pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._PartialUpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2274,24 +5168,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partial_update_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _PartialUpdateInstance(BigtableInstanceAdminRestStub): + class _PartialUpdateInstance( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("PartialUpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.PartialUpdateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2299,7 +5233,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update instance method over HTTP. @@ -2310,8 +5244,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2321,52 +5257,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{instance.name=projects/*/instances/*}", - "body": "instance", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_http_options() + ) + request, metadata = self._interceptor.pre_partial_update_instance( request, metadata ) - pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - request + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._PartialUpdateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2377,22 +5325,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partial_update_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _SetIamPolicy(BigtableInstanceAdminRestStub): + class _SetIamPolicy( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2400,7 +5390,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2410,8 +5400,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2493,48 +5485,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2547,22 +5551,64 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _TestIamPermissions(BigtableInstanceAdminRestStub): + class _TestIamPermissions( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2570,7 +5616,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -2580,58 +5626,388 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: Response message for ``TestIamPermissions`` method. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() + ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp - # Jsonify the request body + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.UpdateAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): uri = transcoded_request["uri"] method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_app_profile( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_request_body_json( + transcoded_request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateCluster( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.UpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: instance.Cluster, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.instance.Cluster): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2640,101 +6016,158 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_test_iam_permissions(resp) + resp = self._interceptor.post_update_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateAppProfile(BigtableInstanceAdminRestStub): + class _UpdateInstance( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.UpdateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, - request: bigtable_instance_admin.UpdateAppProfileRequest, + request: instance.Instance, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operations_pb2.Operation: - r"""Call the update app profile method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.Instance: + r"""Call the update instance method over HTTP. Args: - request (~.bigtable_instance_admin.UpdateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. + request (~.instance.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", - "body": "app_profile", - }, - ] - request, metadata = self._interceptor.pre_update_app_profile( - request, metadata + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() ) - pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2743,36 +6176,90 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_app_profile(resp) + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateCluster(BigtableInstanceAdminRestStub): + class _UpdateLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateCluster") + return hash("BigtableInstanceAdminRestTransport.UpdateLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, - request: instance.Cluster, + request: bigtable_instance_admin.UpdateLogicalViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the update cluster method over HTTP. + r"""Call the update logical view method over HTTP. Args: - request (~.instance.Cluster): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. + request (~.bigtable_instance_admin.UpdateLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2782,47 +6269,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_update_cluster(request, metadata) - pb_request = instance.Cluster.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_query_params_json( + transcoded_request ) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._UpdateLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2833,100 +6337,151 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_cluster(resp) + + resp = self._interceptor.post_update_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateInstance(BigtableInstanceAdminRestStub): + class _UpdateMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.UpdateMaterializedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, - request: instance.Instance, + request: bigtable_instance_admin.UpdateMaterializedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Call the update instance method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update materialized view method over HTTP. Args: - request (~.instance.Instance): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. + request (~.bigtable_instance_admin.UpdateMaterializedViewRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. """ - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*}", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_update_instance(request, metadata) - pb_request = instance.Instance.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._UpdateMaterializedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2935,11 +6490,35 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_instance(resp) + resp = self._interceptor.post_update_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property @@ -2972,6 +6551,27 @@ def create_instance( # In C++ this would require a dynamic_cast return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def delete_app_profile( self, @@ -2996,6 +6596,24 @@ def delete_instance( # In C++ this would require a dynamic_cast return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_logical_view( + self, + ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def get_app_profile( self, @@ -3028,6 +6646,26 @@ def get_instance( # In C++ this would require a dynamic_cast return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def list_app_profiles( self, @@ -3072,6 +6710,28 @@ def list_instances( # In C++ this would require a dynamic_cast return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + bigtable_instance_admin.ListLogicalViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListLogicalViews(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + bigtable_instance_admin.ListMaterializedViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListMaterializedViews(self._session, self._host, self._interceptor) # type: ignore + @property def partial_update_cluster( self, @@ -3133,6 +6793,27 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: # In C++ this would require a dynamic_cast return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def kind(self) -> str: return "rest" diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py new file mode 100644 index 000000000..9855756b8 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py @@ -0,0 +1,1746 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseBigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): + """Base REST backend transport for BigtableInstanceAdmin. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCreateAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "appProfileId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + "body": "app_profile", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*}/instances", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "logicalViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", + "body": "logical_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "materializedViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", + "body": "materialized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "ignoreWarnings": False, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:getIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetMaterializedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAppProfiles: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListClusters: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListHotTablets: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListInstances: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*}/instances", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListLogicalViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListLogicalViewsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListMaterializedViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListMaterializedViewsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePartialUpdateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePartialUpdateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{instance.name=projects/*/instances/*}", + "body": "instance", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:testIamPermissions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", + "body": "app_profile", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = instance.Cluster.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*}", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = instance.Instance.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}", + "body": "logical_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}", + "body": "materialized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableInstanceAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 544649e90..c5e8544d6 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .client import BigtableTableAdminClient -from .async_client import BigtableTableAdminAsyncClient +from .client import BaseBigtableTableAdminClient +from .async_client import BaseBigtableTableAdminAsyncClient __all__ = ( - "BigtableTableAdminClient", - "BigtableTableAdminAsyncClient", + "BaseBigtableTableAdminClient", + "BaseBigtableTableAdminAsyncClient", ) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d5edeb91d..7f772c87c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict -import functools import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -33,14 +34,16 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf + try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -48,16 +51,26 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .client import BigtableTableAdminClient +from .client import BaseBigtableTableAdminClient + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False +_LOGGER = std_logging.getLogger(__name__) -class BigtableTableAdminAsyncClient: + +class BaseBigtableTableAdminAsyncClient: """Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -65,50 +78,66 @@ class BigtableTableAdminAsyncClient: within the tables. """ - _client: BigtableTableAdminClient + _client: BaseBigtableTableAdminClient - DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - backup_path = staticmethod(BigtableTableAdminClient.backup_path) - parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) - cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) - parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + authorized_view_path = staticmethod( + BaseBigtableTableAdminClient.authorized_view_path + ) + parse_authorized_view_path = staticmethod( + BaseBigtableTableAdminClient.parse_authorized_view_path + ) + backup_path = staticmethod(BaseBigtableTableAdminClient.backup_path) + parse_backup_path = staticmethod(BaseBigtableTableAdminClient.parse_backup_path) + cluster_path = staticmethod(BaseBigtableTableAdminClient.cluster_path) + parse_cluster_path = staticmethod(BaseBigtableTableAdminClient.parse_cluster_path) crypto_key_version_path = staticmethod( - BigtableTableAdminClient.crypto_key_version_path + BaseBigtableTableAdminClient.crypto_key_version_path ) parse_crypto_key_version_path = staticmethod( - BigtableTableAdminClient.parse_crypto_key_version_path + BaseBigtableTableAdminClient.parse_crypto_key_version_path + ) + instance_path = staticmethod(BaseBigtableTableAdminClient.instance_path) + parse_instance_path = staticmethod(BaseBigtableTableAdminClient.parse_instance_path) + schema_bundle_path = staticmethod(BaseBigtableTableAdminClient.schema_bundle_path) + parse_schema_bundle_path = staticmethod( + BaseBigtableTableAdminClient.parse_schema_bundle_path ) - instance_path = staticmethod(BigtableTableAdminClient.instance_path) - parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) - snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) - parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) - table_path = staticmethod(BigtableTableAdminClient.table_path) - parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) + snapshot_path = staticmethod(BaseBigtableTableAdminClient.snapshot_path) + parse_snapshot_path = staticmethod(BaseBigtableTableAdminClient.parse_snapshot_path) + table_path = staticmethod(BaseBigtableTableAdminClient.table_path) + parse_table_path = staticmethod(BaseBigtableTableAdminClient.parse_table_path) common_billing_account_path = staticmethod( - BigtableTableAdminClient.common_billing_account_path + BaseBigtableTableAdminClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( - BigtableTableAdminClient.parse_common_billing_account_path + BaseBigtableTableAdminClient.parse_common_billing_account_path ) - common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) + common_folder_path = staticmethod(BaseBigtableTableAdminClient.common_folder_path) parse_common_folder_path = staticmethod( - BigtableTableAdminClient.parse_common_folder_path + BaseBigtableTableAdminClient.parse_common_folder_path ) common_organization_path = staticmethod( - BigtableTableAdminClient.common_organization_path + BaseBigtableTableAdminClient.common_organization_path ) parse_common_organization_path = staticmethod( - BigtableTableAdminClient.parse_common_organization_path + BaseBigtableTableAdminClient.parse_common_organization_path ) - common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) + common_project_path = staticmethod(BaseBigtableTableAdminClient.common_project_path) parse_common_project_path = staticmethod( - BigtableTableAdminClient.parse_common_project_path + BaseBigtableTableAdminClient.parse_common_project_path + ) + common_location_path = staticmethod( + BaseBigtableTableAdminClient.common_location_path ) - common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) parse_common_location_path = staticmethod( - BigtableTableAdminClient.parse_common_location_path + BaseBigtableTableAdminClient.parse_common_location_path ) @classmethod @@ -122,9 +151,9 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminAsyncClient: The constructed client. + BaseBigtableTableAdminAsyncClient: The constructed client. """ - return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore + return BaseBigtableTableAdminClient.from_service_account_info.__func__(BaseBigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -138,9 +167,9 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminAsyncClient: The constructed client. + BaseBigtableTableAdminAsyncClient: The constructed client. """ - return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore + return BaseBigtableTableAdminClient.from_service_account_file.__func__(BaseBigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @@ -178,7 +207,7 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ - return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + return BaseBigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> BigtableTableAdminTransport: @@ -189,20 +218,42 @@ def transport(self) -> BigtableTableAdminTransport: """ return self._client.transport - get_transport_class = functools.partial( - type(BigtableTableAdminClient).get_transport_class, - type(BigtableTableAdminClient), - ) + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = BaseBigtableTableAdminClient.get_transport_class def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin client. + """Instantiates the base bigtable table admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -210,37 +261,76 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ - self._client = BigtableTableAdminClient( + self._client = BaseBigtableTableAdminClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + async def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -250,12 +340,39 @@ async def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = await client.create_table(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): The request object. Request message for @@ -282,11 +399,13 @@ async def create_table( This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -297,16 +416,22 @@ async def create_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, table_id, table] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateTableRequest): + request = bigtable_table_admin.CreateTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -319,11 +444,9 @@ async def create_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -331,6 +454,9 @@ async def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -353,7 +479,7 @@ async def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -366,6 +492,38 @@ async def create_table_from_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for @@ -402,11 +560,13 @@ async def create_table_from_snapshot( This corresponds to the ``source_snapshot`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -418,16 +578,22 @@ async def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, table_id, source_snapshot] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -440,11 +606,9 @@ async def create_table_from_snapshot( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table_from_snapshot, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_table_from_snapshot + ] # Certain fields should be provided within the metadata header; # add these here. @@ -452,6 +616,9 @@ async def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -478,10 +645,37 @@ async def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): The request object. Request message for @@ -494,11 +688,13 @@ async def list_tables( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: @@ -510,16 +706,22 @@ async def list_tables( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.ListTablesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListTablesRequest): + request = bigtable_table_admin.ListTablesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -528,21 +730,9 @@ async def list_tables( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tables, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_tables + ] # Certain fields should be provided within the metadata header; # add these here. @@ -550,6 +740,9 @@ async def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -564,6 +757,8 @@ async def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -577,10 +772,36 @@ async def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): The request object. Request message for @@ -593,11 +814,13 @@ async def get_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -608,16 +831,22 @@ async def get_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.GetTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetTableRequest): + request = bigtable_table_admin.GetTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -626,21 +855,9 @@ async def get_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_table, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -648,6 +865,9 @@ async def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -667,10 +887,39 @@ async def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): The request object. The request for @@ -687,13 +936,14 @@ async def update_table( specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not + message. The wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -701,11 +951,13 @@ async def update_table( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -717,16 +969,22 @@ async def update_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.UpdateTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateTableRequest): + request = bigtable_table_admin.UpdateTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -737,11 +995,9 @@ async def update_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -751,6 +1007,9 @@ async def update_table( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -777,11 +1036,34 @@ async def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + await client.delete_table(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): The request object. Request message for @@ -794,23 +1076,31 @@ async def delete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.DeleteTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteTableRequest): + request = bigtable_table_admin.DeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -819,11 +1109,9 @@ async def delete_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -831,6 +1119,9 @@ async def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -848,11 +1139,41 @@ async def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Restores a specified table which was accidentally deleted. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): The request object. Request message for @@ -865,11 +1186,13 @@ async def undelete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -881,16 +1204,22 @@ async def undelete_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.UndeleteTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): + request = bigtable_table_admin.UndeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -899,11 +1228,9 @@ async def undelete_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undelete_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.undelete_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -911,6 +1238,9 @@ async def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -930,99 +1260,141 @@ async def undelete_table( # Done; return the response. return response - async def modify_column_families( + async def create_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] ] = None, *, - name: Optional[str] = None, - modifications: Optional[ - MutableSequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification - ] - ] = None, + parent: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new AuthorizedView in a table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (:class:`str`): - Required. The unique name of the table whose families - should be modified. Values are of the form + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]]): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + parent (:class:`str`): + Required. This is the name of the table the + AuthorizedView belongs to. Values are of the form ``projects/{project}/instances/{instance}/tables/{table}``. - This corresponds to the ``name`` field + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). + authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): + Required. The AuthorizedView to + create. - This corresponds to the ``modifications`` field + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view_id (:class:`str`): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the + full AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + + This corresponds to the ``authorized_view_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, authorized_view, authorized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): + request = bigtable_table_admin.CreateAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: - request.name = name - if modifications: - request.modifications.extend(modifications) + if parent is not None: + request.parent = parent + if authorized_view is not None: + request.authorized_view = authorized_view + if authorized_view_id is not None: + request.authorized_view_id = authorized_view_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.modify_column_families, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_authorized_view + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1031,109 +1403,232 @@ async def modify_column_families( metadata=metadata, ) + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, + ) + # Done; return the response. return response - async def drop_row_range( + async def list_authorized_views( self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] + ] = None, *, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListAuthorizedViewsAsyncPager: + r"""Lists all AuthorizedViews from a specific table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + parent (:class:`str`): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Iterating over this object will yield results and + resolve additional pages automatically. + """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.DropRowRangeRequest(request) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): + request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.drop_row_range, - default_timeout=3600.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_authorized_views + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. - await rpc( + response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - async def generate_consistency_token( + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAuthorizedViewsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] + Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.AuthorizedView: + r"""Gets information from a specified AuthorizedView. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_authorized_view(request=request) + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] name (:class:`str`): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + Required. The unique name of the requested + AuthorizedView. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + google.cloud.bigtable_admin_v2.types.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): + request = bigtable_table_admin.GetAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1142,21 +1637,9 @@ async def generate_consistency_token( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_consistency_token, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_authorized_view + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1164,6 +1647,9 @@ async def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1175,97 +1661,135 @@ async def generate_consistency_token( # Done; return the response. return response - async def check_consistency( + async def update_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.CheckConsistencyRequest, dict] + Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] ] = None, *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an AuthorizedView in a table. - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (:class:`str`): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + .. code-block:: python - This corresponds to the ``name`` field + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]]): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the + AuthorizedView. AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``authorized_view`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - consistency_token (:class:`str`): - Required. The token created using - GenerateConsistencyToken for the Table. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to update. A mask + specifying which fields in the AuthorizedView resource + should be updated. This mask is relative to the + AuthorizedView resource, not to the request message. A + field will be overwritten if it is in the mask. If + empty, all fields set in the request will be + overwritten. A special value ``*`` means to overwrite + all fields (including fields not set in the request). - This corresponds to the ``consistency_token`` field + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [authorized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.CheckConsistencyRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): + request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token + if authorized_view is not None: + request.authorized_view = authorized_view + if update_mask is not None: + request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_consistency, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_authorized_view + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata( + (("authorized_view.name", request.authorized_view.name),) + ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1274,128 +1798,101 @@ async def check_consistency( metadata=metadata, ) + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, + ) + # Done; return the response. return response - async def snapshot_table( + async def delete_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.SnapshotTableRequest, dict] + Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] ] = None, *, name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes a specified AuthorizedView. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_authorized_view(request=request) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] name (:class:`str`): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`str`): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (:class:`str`): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (:class:`str`): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.SnapshotTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): + request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.snapshot_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_authorized_view + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1403,119 +1900,135 @@ async def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. - response = await rpc( + await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - async def get_snapshot( + async def modify_column_families( self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + ] = None, *, name: Optional[str] = None, + modifications: Optional[ + MutableSequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] + ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = await client.modify_column_families(request=request) + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] name (:class:`str`): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, modifications] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.GetSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name + if modifications: + request.modifications.extend(modifications) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_snapshot, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.modify_column_families + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1523,6 +2036,9 @@ async def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1534,111 +2050,193 @@ async def get_snapshot( # Done; return the response. return response - async def list_snapshots( + async def drop_row_range( self, - request: Optional[ - Union[bigtable_table_admin.ListSnapshotsRequest, dict] - ] = None, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, - parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSnapshotsAsyncPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + await client.drop_row_range(request=request) + Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (:class:`str`): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.drop_row_range + ] - This corresponds to the ``parent`` field + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def generate_consistency_token( + self, + request: Optional[ + Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = await client.generate_consistency_token(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (:class:`str`): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.ListSnapshotsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_table_admin.GenerateConsistencyTokenRequest + ): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: - request.parent = parent + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_snapshots, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_consistency_token + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1647,30 +2245,154 @@ async def list_snapshots( metadata=metadata, ) - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSnapshotsAsyncPager( - method=rpc, - request=request, - response=response, + # Done; return the response. + return response + + async def check_consistency( + self, + request: Optional[ + Union[bigtable_table_admin.CheckConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + consistency_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = await client.check_consistency(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (:class:`str`): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (:class:`str`): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, consistency_token] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.check_consistency + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, metadata=metadata, ) # Done; return the response. return response - async def delete_snapshot( + async def snapshot_table( self, request: Optional[ - Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + Union[bigtable_table_admin.SnapshotTableRequest, dict] ] = None, *, name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to @@ -1679,10 +2401,42 @@ async def delete_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to @@ -1691,114 +2445,1380 @@ async def delete_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. name (:class:`str`): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`str`): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (:class:`str`): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (:class:`str`): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, cluster, snapshot_id, description] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.snapshot_table + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + async def get_snapshot( + self, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = await client.get_snapshot(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_snapshot + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_snapshots( + self, + request: Optional[ + Union[bigtable_table_admin.ListSnapshotsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSnapshotsAsyncPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_snapshots + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSnapshotsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_snapshot( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + await client.delete_snapshot(request=request) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_snapshot + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_backup( + self, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (:class:`str`): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, backup_id, backup] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_backup + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_backup( + self, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_backup(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_backup + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_backup( + self, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, + *, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = await client.update_backup(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [backup, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_backup + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_backup( + self, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + await client.delete_backup(request=request) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_backup + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_backups( + self, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListBackupsAsyncPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (:class:`str`): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_backups + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def _restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + async def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - request = bigtable_table_admin.DeleteSnapshotRequest(request) + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_snapshot, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.restore_table + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. - await rpc( + response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - async def create_backup( + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + async def copy_backup( self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, *, parent: Optional[str] = None, backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. parent (:class:`str`): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exist. + Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. backup_id (:class:`str`): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to create. - This corresponds to the ``backup`` field + source_backup (:class:`str`): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1810,16 +3830,22 @@ async def create_backup( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, backup_id, source_backup, expire_time] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1827,16 +3853,16 @@ async def create_backup( request.parent = parent if backup_id is not None: request.backup_id = backup_id - if backup is not None: - request.backup = backup + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.copy_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1844,6 +3870,9 @@ async def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1857,181 +3886,141 @@ async def create_backup( response, self._client._transport.operations_client, table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, + metadata_type=bigtable_table_admin.CopyBackupMetadata, ) # Done; return the response. return response - async def get_backup( + async def get_iam_policy( self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - name: Optional[str] = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for a Bigtable + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (:class:`str`): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): + The request object. Request message for ``GetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. - This corresponds to the ``name`` field + This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - request = bigtable_table_admin.GetBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_backup, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. - # Done; return the response. - return response + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - async def update_backup( - self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, - *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. + **JSON example:** - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - ``backup.expire_time``. + **YAML example:** - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.UpdateBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("backup.name", request.backup.name),) - ), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2043,157 +4032,256 @@ async def update_backup( # Done; return the response. return response - async def delete_backup( + async def set_iam_policy( self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - name: Optional[str] = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on a Bigtable + resource. Replaces any existing policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (:class:`str`): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): + The request object. Request message for ``SetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. - This corresponds to the ``name`` field + This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). + """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.DeleteBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.set_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. - await rpc( + response = await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - async def list_backups( + # Done; return the response. + return response + + async def test_iam_permissions( self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - parent: Optional[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBackupsAsyncPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Bigtable resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (:class:`str`): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): + The request object. Request message for ``TestIamPermissions`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. - This corresponds to the ``parent`` field + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`MutableSequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.ListBackupsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_backups, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2202,64 +4290,136 @@ async def list_backups( metadata=metadata, ) - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBackupsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - # Done; return the response. return response - async def restore_table( + async def create_schema_bundle( self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] + ] = None, *, + parent: Optional[str] = None, + schema_bundle_id: Optional[str] = None, + schema_bundle: Optional[table.SchemaBundle] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. + r"""Creates a new schema bundle in the specified table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]]): The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + parent (:class:`str`): + Required. The parent resource where this schema bundle + will be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle_id (:class:`str`): + Required. The unique ID to use for + the schema bundle, which will become the + final component of the schema bundle's + resource name. + + This corresponds to the ``schema_bundle_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): + Required. The schema bundle to + create. + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.RestoreTableRequest(request) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, schema_bundle_id, schema_bundle] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): + request = bigtable_table_admin.CreateSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if schema_bundle_id is not None: + request.schema_bundle_id = schema_bundle_id + if schema_bundle is not None: + request.schema_bundle = schema_bundle # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.restore_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_schema_bundle + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2267,6 +4427,9 @@ async def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2279,129 +4442,140 @@ async def restore_table( response = operation_async.from_gapic( response, self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, + table.SchemaBundle, + metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, ) # Done; return the response. return response - async def copy_backup( + async def update_schema_bundle( self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] + ] = None, *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, + schema_bundle: Optional[table.SchemaBundle] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. + r"""Updates a schema bundle in the specified table. - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (:class:`str`): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (:class:`str`): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]]): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): + Required. The schema bundle to update. - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (:class:`str`): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. + The schema bundle's ``name`` field is used to identify + the schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - This corresponds to the ``source_backup`` field + This corresponds to the ``schema_bundle`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. - This corresponds to the ``expire_time`` field + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [schema_bundle, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable_table_admin.CopyBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): + request = bigtable_table_admin.UpdateSchemaBundleRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time + if schema_bundle is not None: + request.schema_bundle = schema_bundle + if update_mask is not None: + request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.copy_backup, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_schema_bundle + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata( + (("schema_bundle.name", request.schema_bundle.name),) + ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2414,121 +4588,117 @@ async def copy_backup( response = operation_async.from_gapic( response, self._client._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, + table.SchemaBundle, + metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, ) # Done; return the response. return response - async def get_iam_policy( + async def get_schema_bundle( self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.GetSchemaBundleRequest, dict] + ] = None, *, - resource: Optional[str] = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for ``GetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Gets metadata information about the specified schema + bundle. - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + .. code-block:: python - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. + async def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) - **JSON example:** + # Make the request + response = await client.get_schema_bundle(request=request) - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + # Handle the response + print(response) - **YAML example:** + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]]): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + name (:class:`str`): + Required. The unique name of the schema bundle to + retrieve. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). + Returns: + google.cloud.bigtable_admin_v2.types.SchemaBundle: + A named collection of related + schemas. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): + request = bigtable_table_admin.GetSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_schema_bundle + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2540,103 +4710,114 @@ async def get_iam_policy( # Done; return the response. return response - async def set_iam_policy( + async def list_schema_bundles( self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] + ] = None, *, - resource: Optional[str] = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSchemaBundlesAsyncPager: + r"""Lists all schema bundles associated with the + specified table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + async for response in page_result: + print(response) Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for ``SetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]]): + The request object. The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + parent (:class:`str`): + Required. The parent, which owns this collection of + schema bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. - This corresponds to the ``resource`` field + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager: + The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). + Iterating over this object will yield results and + resolve additional pages automatically. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): + request = bigtable_table_admin.ListSchemaBundlesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_schema_bundles + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2645,109 +4826,123 @@ async def set_iam_policy( metadata=metadata, ) + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSchemaBundlesAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + # Done; return the response. return response - async def test_iam_permissions( + async def delete_schema_bundle( self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] + ] = None, *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Table or Backup resource. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a schema bundle in the specified table. - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for ``TestIamPermissions`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. + .. code-block:: python - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (:class:`MutableSequence[str]`): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 - This corresponds to the ``permissions`` field + async def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + await client.delete_schema_bundle(request=request) + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]]): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + name (:class:`str`): + Required. The unique name of the schema bundle to + delete. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, - ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): + request = bigtable_table_admin.DeleteSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_schema_bundle + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. - response = await rpc( + await rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - # Done; return the response. - return response - - async def __aenter__(self) -> "BigtableTableAdminAsyncClient": + async def __aenter__(self) -> "BaseBigtableTableAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): @@ -2758,5 +4953,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + -__all__ = ("BigtableTableAdminAsyncClient",) +__all__ = ("BaseBigtableTableAdminAsyncClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index d0c04ed11..ce251db7d 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,14 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -28,6 +32,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -40,11 +45,21 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -52,6 +67,7 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -62,7 +78,7 @@ from .transports.rest import BigtableTableAdminRestTransport -class BigtableTableAdminClientMeta(type): +class BaseBigtableTableAdminClientMeta(type): """Metaclass for the BigtableTableAdmin client. This provides class-level methods for building and retrieving @@ -99,7 +115,7 @@ def get_transport_class( return next(iter(cls._transport_registry.values())) -class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): +class BaseBigtableTableAdminClient(metaclass=BaseBigtableTableAdminClientMeta): """Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -137,11 +153,43 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -153,7 +201,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminClient: The constructed client. + BaseBigtableTableAdminClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials @@ -171,7 +219,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminClient: The constructed client. + BaseBigtableTableAdminClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -189,6 +237,30 @@ def transport(self) -> BigtableTableAdminTransport: """ return self._transport + @staticmethod + def authorized_view_path( + project: str, + instance: str, + table: str, + authorized_view: str, + ) -> str: + """Returns a fully-qualified authorized_view string.""" + return "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + + @staticmethod + def parse_authorized_view_path(path: str) -> Dict[str, str]: + """Parses a authorized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P.+?)/authorizedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def backup_path( project: str, @@ -278,6 +350,30 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def schema_bundle_path( + project: str, + instance: str, + table: str, + schema_bundle: str, + ) -> str: + """Returns a fully-qualified schema_bundle string.""" + return "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( + project=project, + instance=instance, + table=table, + schema_bundle=schema_bundle, + ) + + @staticmethod + def parse_schema_bundle_path(path: str) -> Dict[str, str]: + """Parses a schema_bundle path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/schemaBundles/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def snapshot_path( project: str, @@ -405,7 +501,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -435,14 +531,15 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -450,7 +547,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -468,15 +565,182 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = BaseBigtableTableAdminClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTableAdminTransport]] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin client. + """Instantiates the base bigtable table admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -484,25 +748,37 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -513,17 +789,38 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BaseBigtableTableAdminClient._read_environment_variables() + self._client_cert_source = BaseBigtableTableAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BaseBigtableTableAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() - api_key_value = getattr(client_options, "api_key", None) + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -532,20 +829,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTableAdminTransport): + transport_provided = isinstance(transport, BigtableTableAdminTransport) + if transport_provided: # transport is a BigtableTableAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTableAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BaseBigtableTableAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -555,19 +865,50 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) - self._transport = Transport( + transport_init: Union[ + Type[BigtableTableAdminTransport], + Callable[..., BigtableTableAdminTransport], + ] = ( + BaseBigtableTableAdminClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableTableAdminTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -577,12 +918,39 @@ def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = client.create_table(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): The request object. Request message for @@ -612,8 +980,10 @@ def create_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -624,19 +994,20 @@ def create_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, table_id, table] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CreateTableRequest): request = bigtable_table_admin.CreateTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -658,6 +1029,9 @@ def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -680,7 +1054,7 @@ def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -693,6 +1067,38 @@ def create_table_from_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for @@ -732,8 +1138,10 @@ def create_table_from_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -745,19 +1153,20 @@ def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, table_id, source_snapshot] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableFromSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the @@ -781,6 +1190,9 @@ def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -807,10 +1219,37 @@ def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): The request object. Request message for @@ -826,8 +1265,10 @@ def list_tables( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: @@ -839,19 +1280,20 @@ def list_tables( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListTablesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.ListTablesRequest): request = bigtable_table_admin.ListTablesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -869,6 +1311,9 @@ def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -883,6 +1328,8 @@ def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -896,10 +1343,36 @@ def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = client.get_table(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): The request object. Request message for @@ -915,8 +1388,10 @@ def get_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -927,19 +1402,20 @@ def get_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.GetTableRequest): request = bigtable_table_admin.GetTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -957,6 +1433,9 @@ def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -976,10 +1455,39 @@ def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): The request object. The request for @@ -996,13 +1504,14 @@ def update_table( specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not + message. The wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -1013,8 +1522,10 @@ def update_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1026,19 +1537,20 @@ def update_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.UpdateTableRequest): request = bigtable_table_admin.UpdateTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1060,6 +1572,9 @@ def update_table( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1086,11 +1601,34 @@ def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + client.delete_table(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): The request object. Request message for @@ -1106,23 +1644,26 @@ def delete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.DeleteTableRequest): request = bigtable_table_admin.DeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1140,6 +1681,9 @@ def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1157,11 +1701,41 @@ def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Restores a specified table which was accidentally deleted. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): The request object. Request message for @@ -1177,8 +1751,10 @@ def undelete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1190,19 +1766,20 @@ def undelete_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UndeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): request = bigtable_table_admin.UndeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1220,6 +1797,9 @@ def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1239,99 +1819,138 @@ def undelete_table( # Done; return the response. return response - def modify_column_families( + def create_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] ] = None, *, - name: Optional[str] = None, - modifications: Optional[ - MutableSequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification - ] - ] = None, + parent: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new AuthorizedView in a table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (str): - Required. The unique name of the table whose families - should be modified. Values are of the form + request (Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + parent (str): + Required. This is the name of the table the + AuthorizedView belongs to. Values are of the form ``projects/{project}/instances/{instance}/tables/{table}``. - This corresponds to the ``name`` field + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to + create. - This corresponds to the ``modifications`` field + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view_id (str): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the + full AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + + This corresponds to the ``authorized_view_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, authorized_view, authorized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ModifyColumnFamiliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): + request = bigtable_table_admin.CreateAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: - request.name = name - if modifications is not None: - request.modifications = modifications + if parent is not None: + request.parent = parent + if authorized_view is not None: + request.authorized_view = authorized_view + if authorized_view_id is not None: + request.authorized_view_id = authorized_view_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.modify_column_families] + rpc = self._transport._wrapped_methods[self._transport.create_authorized_view] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1340,83 +1959,190 @@ def modify_column_families( metadata=metadata, ) + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, + ) + # Done; return the response. return response - def drop_row_range( + def list_authorized_views( self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] + ] = None, *, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListAuthorizedViewsPager: + r"""Lists all AuthorizedViews from a specific table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + for response in page_result: + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + parent (str): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Iterating over this object will yield results and + resolve additional pages automatically. + """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DropRowRangeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): - request = bigtable_table_admin.DropRowRangeRequest(request) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): + request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.drop_row_range] + rpc = self._transport._wrapped_methods[self._transport.list_authorized_views] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - rpc( + response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - def generate_consistency_token( + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAuthorizedViewsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] + Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.AuthorizedView: + r"""Gets information from a specified AuthorizedView. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_authorized_view(request=request) + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] name (str): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + Required. The unique name of the requested + AuthorizedView. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1424,33 +2150,38 @@ def generate_consistency_token( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + google.cloud.bigtable_admin_v2.types.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GenerateConsistencyTokenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance( - request, bigtable_table_admin.GenerateConsistencyTokenRequest - ): - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): + request = bigtable_table_admin.GetAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: @@ -1458,9 +2189,7 @@ def generate_consistency_token( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[ - self._transport.generate_consistency_token - ] + rpc = self._transport._wrapped_methods[self._transport.get_authorized_view] # Certain fields should be provided within the metadata header; # add these here. @@ -1468,6 +2197,9 @@ def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1479,87 +2211,132 @@ def generate_consistency_token( # Done; return the response. return response - def check_consistency( + def update_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.CheckConsistencyRequest, dict] + Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] ] = None, *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates an AuthorizedView in a table. - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (str): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + .. code-block:: python - This corresponds to the ``name`` field + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the + AuthorizedView. AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``authorized_view`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - consistency_token (str): - Required. The token created using - GenerateConsistencyToken for the Table. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. A mask + specifying which fields in the AuthorizedView resource + should be updated. This mask is relative to the + AuthorizedView resource, not to the request message. A + field will be overwritten if it is in the mask. If + empty, all fields set in the request will be + overwritten. A special value ``*`` means to overwrite + all fields (including fields not set in the request). - This corresponds to the ``consistency_token`` field + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [authorized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CheckConsistencyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): - request = bigtable_table_admin.CheckConsistencyRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): + request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token + if authorized_view is not None: + request.authorized_view = authorized_view + if update_mask is not None: + request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_consistency] + rpc = self._transport._wrapped_methods[self._transport.update_authorized_view] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata( + (("authorized_view.name", request.authorized_view.name),) + ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1568,128 +2345,98 @@ def check_consistency( metadata=metadata, ) + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, + ) + # Done; return the response. return response - def snapshot_table( + def delete_authorized_view( self, request: Optional[ - Union[bigtable_table_admin.SnapshotTableRequest, dict] + Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] ] = None, *, name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes a specified AuthorizedView. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + # Make the request + client.delete_authorized_view(request=request) Args: - request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] name (str): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (str): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (str): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (str): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.SnapshotTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): - request = bigtable_table_admin.SnapshotTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): + request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.snapshot_table] + rpc = self._transport._wrapped_methods[self._transport.delete_authorized_view] # Certain fields should be provided within the metadata header; # add these here. @@ -1697,109 +2444,132 @@ def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - response = rpc( + rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - def get_snapshot( + def modify_column_families( self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + ] = None, *, name: Optional[str] = None, + modifications: Optional[ + MutableSequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] + ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = client.modify_column_families(request=request) + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] name (str): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, modifications] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): - request = bigtable_table_admin.GetSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name + if modifications is not None: + request.modifications = modifications # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_snapshot] + rpc = self._transport._wrapped_methods[self._transport.modify_column_families] # Certain fields should be provided within the metadata header; # add these here. @@ -1807,6 +2577,9 @@ def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1818,156 +2591,133 @@ def get_snapshot( # Done; return the response. return response - def list_snapshots( + def drop_row_range( self, - request: Optional[ - Union[bigtable_table_admin.ListSnapshotsRequest, dict] - ] = None, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, - parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSnapshotsPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (str): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. + # Make the request + client.drop_row_range(request=request) - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. - + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListSnapshotsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): - request = bigtable_table_admin.ListSnapshotsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_snapshots] + rpc = self._transport._wrapped_methods[self._transport.drop_row_range] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - response = rpc( + rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSnapshotsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_snapshot( + def generate_consistency_token( self, request: Optional[ - Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] ] = None, *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = client.generate_consistency_token(request=request) + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] name (str): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1975,25 +2725,36 @@ def delete_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): - request = bigtable_table_admin.DeleteSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_table_admin.GenerateConsistencyTokenRequest + ): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: @@ -2001,7 +2762,9 @@ def delete_snapshot( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] + rpc = self._transport._wrapped_methods[ + self._transport.generate_consistency_token + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2009,108 +2772,1427 @@ def delete_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - rpc( + response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - def create_backup( + # Done; return the response. + return response + + def check_consistency( self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, - *, + request: Optional[ + Union[bigtable_table_admin.CheckConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + consistency_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = client.check_consistency(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (str): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, consistency_token] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_consistency] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def snapshot_table( + self, + request: Optional[ + Union[bigtable_table_admin.SnapshotTableRequest, dict] + ] = None, + *, + name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (str): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (str): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, cluster, snapshot_id, description] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.snapshot_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + def get_snapshot( + self, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = client.get_snapshot(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_snapshots( + self, + request: Optional[ + Union[bigtable_table_admin.ListSnapshotsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSnapshotsPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_snapshots] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSnapshotsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_snapshot( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + client.delete_snapshot(request=request) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_backup( + self, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (str): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, backup_id, backup] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + def get_backup( + self, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = client.get_backup(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_backup( + self, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, + *, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = client.update_backup(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [backup, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_backup( + self, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + client.delete_backup(request=request) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (str): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_backups( + self, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + *, parent: Optional[str] = None, - backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListBackupsPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (str): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def _restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The + r"""Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - parent (str): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. + response = operation.result() - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + # Handle the response + print(response) - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to create. - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) - if request is not None and has_flattened_params: - raise ValueError( - "If the `request` argument is set, then none of " - "the individual field arguments should be set." - ) - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CreateBackupRequest): - request = bigtable_table_admin.CreateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if backup is not None: - request.backup = backup + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_backup] + rpc = self._transport._wrapped_methods[self._transport.restore_table] # Certain fields should be provided within the metadata header; # add these here. @@ -2118,6 +4200,9 @@ def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2130,77 +4215,167 @@ def create_backup( response = operation.from_gapic( response, self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, ) # Done; return the response. return response - def get_backup( + def copy_backup( self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, *, - name: Optional[str] = None, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (str): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exist. + Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. - This corresponds to the ``name`` field + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (str): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, backup_id, source_backup, expire_time] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GetBackupRequest): - request = bigtable_table_admin.GetBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: - request.name = name + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_backup] + rpc = self._transport._wrapped_methods[self._transport.copy_backup] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2209,93 +4384,147 @@ def get_backup( metadata=metadata, ) + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + # Done; return the response. return response - def update_backup( + def get_iam_policy( self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for a Bigtable + resource. Returns an empty policy if the resource exists + but does not have a policy set. - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) - - ``backup.expire_time``. + # Make the request + response = client.get_iam_policy(request=request) - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. + # Handle the response + print(response) - This corresponds to the ``update_mask`` field + Args: + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + The request object. Request message for ``GetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). + """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): - request = bigtable_table_admin.UpdateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.GetIamPolicyRequest() + if resource is not None: + request.resource = resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_backup] + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("backup.name", request.backup.name),) - ), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2307,147 +4536,258 @@ def update_backup( # Done; return the response. return response - def delete_backup( + def set_iam_policy( self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - name: Optional[str] = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on a Bigtable + resource. Replaces any existing policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (str): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + The request object. Request message for ``SetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. - This corresponds to the ``name`` field + This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](https://cloud.google.com/iam/docs/). + """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): - request = bigtable_table_admin.DeleteBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.SetIamPolicyRequest() + if resource is not None: + request.resource = resource # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_backup] + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - rpc( + response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - def list_backups( + # Done; return the response. + return response + + def test_iam_permissions( self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - parent: Optional[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBackupsPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Bigtable resource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (str): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + The request object. Request message for ``TestIamPermissions`` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. - This corresponds to the ``parent`` field + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (MutableSequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListBackupsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ListBackupsRequest): - request = bigtable_table_admin.ListBackupsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent + if isinstance(request, dict): + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.TestIamPermissionsRequest() + if resource is not None: + request.resource = resource + if permissions: + request.permissions.extend(permissions) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_backups] + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2456,65 +4796,133 @@ def list_backups( metadata=metadata, ) - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBackupsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - # Done; return the response. return response - def restore_table( + def create_schema_bundle( self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] + ] = None, *, + parent: Optional[str] = None, + schema_bundle_id: Optional[str] = None, + schema_bundle: Optional[table.SchemaBundle] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. + r"""Creates a new schema bundle in the specified table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) Args: - request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + request (Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]): The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + parent (str): + Required. The parent resource where this schema bundle + will be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle_id (str): + Required. The unique ID to use for + the schema bundle, which will become the + final component of the schema bundle's + resource name. + + This corresponds to the ``schema_bundle_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to + create. + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.RestoreTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.RestoreTableRequest): - request = bigtable_table_admin.RestoreTableRequest(request) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, schema_bundle_id, schema_bundle] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): + request = bigtable_table_admin.CreateSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if schema_bundle_id is not None: + request.schema_bundle_id = schema_bundle_id + if schema_bundle is not None: + request.schema_bundle = schema_bundle # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.restore_table] + rpc = self._transport._wrapped_methods[self._transport.create_schema_bundle] # Certain fields should be provided within the metadata header; # add these here. @@ -2522,6 +4930,9 @@ def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2534,129 +4945,137 @@ def restore_table( response = operation.from_gapic( response, self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, + table.SchemaBundle, + metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, ) # Done; return the response. return response - def copy_backup( + def update_schema_bundle( self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] + ] = None, *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, + schema_bundle: Optional[table.SchemaBundle] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. + r"""Updates a schema bundle in the specified table. - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (str): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. + .. code-block:: python - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (str): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) - This corresponds to the ``source_backup`` field + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to update. + + The schema bundle's ``name`` field is used to identify + the schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``schema_bundle`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. - This corresponds to the ``expire_time`` field + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [schema_bundle, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CopyBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CopyBackupRequest): - request = bigtable_table_admin.CopyBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): + request = bigtable_table_admin.UpdateSchemaBundleRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time + if schema_bundle is not None: + request.schema_bundle = schema_bundle + if update_mask is not None: + request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.copy_backup] + rpc = self._transport._wrapped_methods[self._transport.update_schema_bundle] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + gapic_v1.routing_header.to_grpc_metadata( + (("schema_bundle.name", request.schema_bundle.name),) + ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2669,108 +5088,114 @@ def copy_backup( response = operation.from_gapic( response, self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, + table.SchemaBundle, + metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, ) # Done; return the response. return response - def get_iam_policy( + def get_schema_bundle( self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.GetSchemaBundleRequest, dict] + ] = None, *, - resource: Optional[str] = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Gets metadata information about the specified schema + bundle. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = client.get_schema_bundle(request=request) + + # Handle the response + print(response) Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for ``GetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. + request (Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + name (str): + Required. The unique name of the schema bundle to + retrieve. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - This corresponds to the ``resource`` field + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). + google.cloud.bigtable_admin_v2.types.SchemaBundle: + A named collection of related + schemas. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.GetIamPolicyRequest() - if resource is not None: - request.resource = resource + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): + request = bigtable_table_admin.GetSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + rpc = self._transport._wrapped_methods[self._transport.get_schema_bundle] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2782,100 +5207,111 @@ def get_iam_policy( # Done; return the response. return response - def set_iam_policy( + def list_schema_bundles( self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] + ] = None, *, - resource: Optional[str] = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSchemaBundlesPager: + r"""Lists all schema bundles associated with the + specified table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + for response in page_result: + print(response) Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for ``SetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. + request (Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]): + The request object. The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + parent (str): + Required. The parent, which owns this collection of + schema bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. - This corresponds to the ``resource`` field + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager: + The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). + Iterating over this object will yield results and + resolve additional pages automatically. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.SetIamPolicyRequest() - if resource is not None: - request.resource = resource + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): + request = bigtable_table_admin.ListSchemaBundlesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + rpc = self._transport._wrapped_methods[self._transport.list_schema_bundles] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2884,97 +5320,120 @@ def set_iam_policy( metadata=metadata, ) + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSchemaBundlesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + # Done; return the response. return response - def test_iam_permissions( + def delete_schema_bundle( self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + request: Optional[ + Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] + ] = None, *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Table or Backup resource. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a schema bundle in the specified table. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) - Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for ``TestIamPermissions`` method. - resource (str): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. + # Make the request + client.delete_schema_bundle(request=request) - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (MutableSequence[str]): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + name (str): + Required. The unique name of the schema bundle to + delete. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` - This corresponds to the ``permissions`` field + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.TestIamPermissionsRequest() - if resource is not None: - request.resource = resource - if permissions: - request.permissions.extend(permissions) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): + request = bigtable_table_admin.DeleteSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + rpc = self._transport._wrapped_methods[self._transport.delete_schema_bundle] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. - response = rpc( + rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) - # Done; return the response. - return response - - def __enter__(self) -> "BigtableTableAdminClient": + def __enter__(self) -> "BaseBigtableTableAdminClient": return self def __exit__(self, type, value, traceback): @@ -2992,5 +5451,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ -__all__ = ("BigtableTableAdminClient",) +__all__ = ("BaseBigtableTableAdminClient",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 331647b4c..e6d83ba63 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -52,7 +65,9 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -63,12 +78,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +101,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Table]: @@ -114,7 +141,9 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -125,12 +154,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -141,7 +177,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Table]: @@ -156,6 +197,166 @@ def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) +class ListAuthorizedViewsPager: + """A pager for iterating through ``list_authorized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``authorized_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAuthorizedViews`` requests and continue to iterate + through the ``authorized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListAuthorizedViewsResponse], + request: bigtable_table_admin.ListAuthorizedViewsRequest, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListAuthorizedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[table.AuthorizedView]: + for page in self.pages: + yield from page.authorized_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAuthorizedViewsAsyncPager: + """A pager for iterating through ``list_authorized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``authorized_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAuthorizedViews`` requests and continue to iterate + through the ``authorized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse] + ], + request: bigtable_table_admin.ListAuthorizedViewsRequest, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_table_admin.ListAuthorizedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[table.AuthorizedView]: + async def async_generator(): + async for page in self.pages: + for response in page.authorized_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + class ListSnapshotsPager: """A pager for iterating through ``list_snapshots`` requests. @@ -180,7 +381,9 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -191,12 +394,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -207,7 +417,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Snapshot]: @@ -242,7 +457,9 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -253,12 +470,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -269,7 +493,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsRespons yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Snapshot]: @@ -308,7 +537,9 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -319,12 +550,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -335,7 +573,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Backup]: @@ -370,7 +613,9 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, - metadata: Sequence[Tuple[str, str]] = () + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -381,12 +626,19 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -397,7 +649,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse] yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Backup]: @@ -410,3 +667,163 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSchemaBundlesPager: + """A pager for iterating through ``list_schema_bundles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``schema_bundles`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSchemaBundles`` requests and continue to iterate + through the ``schema_bundles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListSchemaBundlesResponse], + request: bigtable_table_admin.ListSchemaBundlesRequest, + response: bigtable_table_admin.ListSchemaBundlesResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListSchemaBundlesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[table.SchemaBundle]: + for page in self.pages: + yield from page.schema_bundles + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSchemaBundlesAsyncPager: + """A pager for iterating through ``list_schema_bundles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``schema_bundles`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSchemaBundles`` requests and continue to iterate + through the ``schema_bundles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_table_admin.ListSchemaBundlesResponse] + ], + request: bigtable_table_admin.ListSchemaBundlesRequest, + response: bigtable_table_admin.ListSchemaBundlesResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_table_admin.ListSchemaBundlesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[table.SchemaBundle]: + async def async_generator(): + async for page in self.pages: + for response in page.schema_bundles: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst new file mode 100644 index 000000000..0e8f40ec3 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableTableAdminTransport` is the ABC for all transports. +- public child `BigtableTableAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableTableAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableTableAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableTableAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index be4aa8d2a..e7621f781 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index c3cf01a96..8ad08df3f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -39,6 +40,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTableAdminTransport(abc.ABC): """Abstract transport class for BigtableTableAdmin.""" @@ -71,15 +75,16 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -96,6 +101,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -108,7 +115,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) @@ -134,6 +141,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -184,7 +195,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_table: gapic_v1.method.wrap_method( self.delete_table, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.undelete_table: gapic_v1.method.wrap_method( @@ -192,6 +203,31 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.create_authorized_view: gapic_v1.method.wrap_method( + self.create_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_authorized_views: gapic_v1.method.wrap_method( + self.list_authorized_views, + default_timeout=None, + client_info=client_info, + ), + self.get_authorized_view: gapic_v1.method.wrap_method( + self.get_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.update_authorized_view: gapic_v1.method.wrap_method( + self.update_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_authorized_view: gapic_v1.method.wrap_method( + self.delete_authorized_view, + default_timeout=None, + client_info=client_info, + ), self.modify_column_families: gapic_v1.method.wrap_method( self.modify_column_families, default_timeout=300.0, @@ -227,9 +263,9 @@ def _prep_wrapped_messages(self, client_info): core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), - deadline=60.0, + deadline=3600.0, ), - default_timeout=60.0, + default_timeout=3600.0, client_info=client_info, ), self.snapshot_table: gapic_v1.method.wrap_method( @@ -269,7 +305,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_snapshot: gapic_v1.method.wrap_method( self.delete_snapshot, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.create_backup: gapic_v1.method.wrap_method( @@ -299,7 +335,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_backup: gapic_v1.method.wrap_method( self.delete_backup, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.list_backups: gapic_v1.method.wrap_method( @@ -362,6 +398,31 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_schema_bundle: gapic_v1.method.wrap_method( + self.create_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.update_schema_bundle: gapic_v1.method.wrap_method( + self.update_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.get_schema_bundle: gapic_v1.method.wrap_method( + self.get_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.list_schema_bundles: gapic_v1.method.wrap_method( + self.list_schema_bundles, + default_timeout=None, + client_info=client_info, + ), + self.delete_schema_bundle: gapic_v1.method.wrap_method( + self.delete_schema_bundle, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -444,6 +505,54 @@ def undelete_table( ]: raise NotImplementedError() + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + Union[ + bigtable_table_admin.ListAuthorizedViewsResponse, + Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], + Union[table.AuthorizedView, Awaitable[table.AuthorizedView]], + ]: + raise NotImplementedError() + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteAuthorizedViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def modify_column_families( self, @@ -621,6 +730,54 @@ def test_iam_permissions( ]: raise NotImplementedError() + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.GetSchemaBundleRequest], + Union[table.SchemaBundle, Awaitable[table.SchemaBundle]], + ]: + raise NotImplementedError() + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + Union[ + bigtable_table_admin.ListSchemaBundlesResponse, + Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteSchemaBundleRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index d765869cd..f8d1058c8 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -34,6 +40,80 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): """gRPC backend transport for BigtableTableAdmin. @@ -61,7 +141,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -75,20 +155,24 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -98,11 +182,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -129,9 +213,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -170,7 +255,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -186,7 +273,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -207,9 +299,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -250,7 +343,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -276,7 +371,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -313,7 +408,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -342,7 +439,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -368,7 +465,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -394,7 +491,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -421,7 +518,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -450,13 +547,152 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["undelete_table"] + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create authorized view method over gRPC. + + Creates a new AuthorizedView in a table. + + Returns: + Callable[[~.CreateAuthorizedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_authorized_view" not in self._stubs: + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", + request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_authorized_view"] + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + bigtable_table_admin.ListAuthorizedViewsResponse, + ]: + r"""Return a callable for the list authorized views method over gRPC. + + Lists all AuthorizedViews from a specific table. + + Returns: + Callable[[~.ListAuthorizedViewsRequest], + ~.ListAuthorizedViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_authorized_views" not in self._stubs: + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", + request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, + response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, + ) + return self._stubs["list_authorized_views"] + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView + ]: + r"""Return a callable for the get authorized view method over gRPC. + + Gets information from a specified AuthorizedView. + + Returns: + Callable[[~.GetAuthorizedViewRequest], + ~.AuthorizedView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_authorized_view" not in self._stubs: + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", + request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, + response_deserializer=table.AuthorizedView.deserialize, + ) + return self._stubs["get_authorized_view"] + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update authorized view method over gRPC. + + Updates an AuthorizedView in a table. + + Returns: + Callable[[~.UpdateAuthorizedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_authorized_view" not in self._stubs: + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", + request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_authorized_view"] + + @property + def delete_authorized_view( + self, + ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: + r"""Return a callable for the delete authorized view method over gRPC. + + Permanently deletes a specified AuthorizedView. + + Returns: + Callable[[~.DeleteAuthorizedViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_authorized_view" not in self._stubs: + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", + request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_authorized_view"] + @property def modify_column_families( self, @@ -480,7 +716,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -509,7 +745,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -542,7 +778,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -574,7 +812,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -611,7 +849,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -644,7 +882,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -680,7 +918,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -713,7 +951,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -747,7 +985,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -774,7 +1012,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -800,7 +1038,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -826,7 +1064,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -856,7 +1094,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -874,7 +1112,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -889,7 +1127,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -917,7 +1155,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -930,7 +1168,7 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. - Gets the access control policy for a Table or Backup + Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -945,7 +1183,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -958,7 +1196,7 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. - Sets the access control policy on a Table or Backup + Sets the access control policy on a Bigtable resource. Replaces any existing policy. Returns: @@ -972,7 +1210,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -989,7 +1227,7 @@ def test_iam_permissions( r"""Return a callable for the test iam permissions method over gRPC. Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Returns: Callable[[~.TestIamPermissionsRequest], @@ -1002,15 +1240,154 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create schema bundle method over gRPC. + + Creates a new schema bundle in the specified table. + + Returns: + Callable[[~.CreateSchemaBundleRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_schema_bundle" not in self._stubs: + self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", + request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_schema_bundle"] + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update schema bundle method over gRPC. + + Updates a schema bundle in the specified table. + + Returns: + Callable[[~.UpdateSchemaBundleRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_schema_bundle" not in self._stubs: + self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", + request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_schema_bundle"] + + @property + def get_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: + r"""Return a callable for the get schema bundle method over gRPC. + + Gets metadata information about the specified schema + bundle. + + Returns: + Callable[[~.GetSchemaBundleRequest], + ~.SchemaBundle]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_schema_bundle" not in self._stubs: + self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", + request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, + response_deserializer=table.SchemaBundle.deserialize, + ) + return self._stubs["get_schema_bundle"] + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + bigtable_table_admin.ListSchemaBundlesResponse, + ]: + r"""Return a callable for the list schema bundles method over gRPC. + + Lists all schema bundles associated with the + specified table. + + Returns: + Callable[[~.ListSchemaBundlesRequest], + ~.ListSchemaBundlesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_schema_bundles" not in self._stubs: + self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", + request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, + response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, + ) + return self._stubs["list_schema_bundles"] + + @property + def delete_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: + r"""Return a callable for the delete schema bundle method over gRPC. + + Deletes a schema bundle in the specified table. + + Returns: + Callable[[~.DeleteSchemaBundleRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_schema_bundle" not in self._stubs: + self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", + request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_schema_bundle"] + def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index b60a7351c..5017f17d0 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,16 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -35,6 +44,82 @@ from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableTableAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): """gRPC AsyncIO backend transport for BigtableTableAdmin. @@ -74,9 +159,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -106,7 +191,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -120,21 +205,25 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -144,11 +233,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -175,9 +264,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -215,7 +305,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -231,7 +323,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -254,7 +352,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -283,7 +381,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -321,7 +419,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -350,7 +450,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -376,7 +476,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -404,7 +504,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -433,7 +533,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -462,13 +562,156 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["undelete_table"] + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create authorized view method over gRPC. + + Creates a new AuthorizedView in a table. + + Returns: + Callable[[~.CreateAuthorizedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_authorized_view" not in self._stubs: + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", + request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_authorized_view"] + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], + ]: + r"""Return a callable for the list authorized views method over gRPC. + + Lists all AuthorizedViews from a specific table. + + Returns: + Callable[[~.ListAuthorizedViewsRequest], + Awaitable[~.ListAuthorizedViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_authorized_views" not in self._stubs: + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", + request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, + response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, + ) + return self._stubs["list_authorized_views"] + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], Awaitable[table.AuthorizedView] + ]: + r"""Return a callable for the get authorized view method over gRPC. + + Gets information from a specified AuthorizedView. + + Returns: + Callable[[~.GetAuthorizedViewRequest], + Awaitable[~.AuthorizedView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_authorized_view" not in self._stubs: + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", + request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, + response_deserializer=table.AuthorizedView.deserialize, + ) + return self._stubs["get_authorized_view"] + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update authorized view method over gRPC. + + Updates an AuthorizedView in a table. + + Returns: + Callable[[~.UpdateAuthorizedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_authorized_view" not in self._stubs: + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", + request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_authorized_view"] + + @property + def delete_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteAuthorizedViewRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete authorized view method over gRPC. + + Permanently deletes a specified AuthorizedView. + + Returns: + Callable[[~.DeleteAuthorizedViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_authorized_view" not in self._stubs: + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", + request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_authorized_view"] + @property def modify_column_families( self, @@ -494,7 +737,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -525,7 +768,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -558,7 +801,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -590,7 +835,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -627,7 +872,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -660,7 +905,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -696,7 +941,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -731,7 +976,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -767,7 +1012,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -794,7 +1039,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -820,7 +1065,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -848,7 +1093,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -878,7 +1123,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -898,7 +1143,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -913,7 +1158,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -943,7 +1188,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -956,7 +1201,7 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the get iam policy method over gRPC. - Gets the access control policy for a Table or Backup + Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -971,7 +1216,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -984,7 +1229,7 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the set iam policy method over gRPC. - Sets the access control policy on a Table or Backup + Sets the access control policy on a Bigtable resource. Replaces any existing policy. Returns: @@ -998,7 +1243,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1015,7 +1260,7 @@ def test_iam_permissions( r"""Return a callable for the test iam permissions method over gRPC. Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Returns: Callable[[~.TestIamPermissionsRequest], @@ -1028,15 +1273,449 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create schema bundle method over gRPC. + + Creates a new schema bundle in the specified table. + + Returns: + Callable[[~.CreateSchemaBundleRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_schema_bundle" not in self._stubs: + self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", + request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_schema_bundle"] + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update schema bundle method over gRPC. + + Updates a schema bundle in the specified table. + + Returns: + Callable[[~.UpdateSchemaBundleRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_schema_bundle" not in self._stubs: + self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", + request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_schema_bundle"] + + @property + def get_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.GetSchemaBundleRequest], Awaitable[table.SchemaBundle] + ]: + r"""Return a callable for the get schema bundle method over gRPC. + + Gets metadata information about the specified schema + bundle. + + Returns: + Callable[[~.GetSchemaBundleRequest], + Awaitable[~.SchemaBundle]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_schema_bundle" not in self._stubs: + self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", + request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, + response_deserializer=table.SchemaBundle.deserialize, + ) + return self._stubs["get_schema_bundle"] + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], + ]: + r"""Return a callable for the list schema bundles method over gRPC. + + Lists all schema bundles associated with the + specified table. + + Returns: + Callable[[~.ListSchemaBundlesRequest], + Awaitable[~.ListSchemaBundlesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_schema_bundles" not in self._stubs: + self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", + request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, + response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, + ) + return self._stubs["list_schema_bundles"] + + @property + def delete_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteSchemaBundleRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete schema bundle method over gRPC. + + Deletes a schema bundle in the specified table. + + Returns: + Callable[[~.DeleteSchemaBundleRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_schema_bundle" not in self._stubs: + self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", + request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_schema_bundle"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_table: self._wrap_method( + self.create_table, + default_timeout=300.0, + client_info=client_info, + ), + self.create_table_from_snapshot: self._wrap_method( + self.create_table_from_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.list_tables: self._wrap_method( + self.list_tables, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_table: self._wrap_method( + self.get_table, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_table: self._wrap_method( + self.update_table, + default_timeout=None, + client_info=client_info, + ), + self.delete_table: self._wrap_method( + self.delete_table, + default_timeout=300.0, + client_info=client_info, + ), + self.undelete_table: self._wrap_method( + self.undelete_table, + default_timeout=None, + client_info=client_info, + ), + self.create_authorized_view: self._wrap_method( + self.create_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_authorized_views: self._wrap_method( + self.list_authorized_views, + default_timeout=None, + client_info=client_info, + ), + self.get_authorized_view: self._wrap_method( + self.get_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.update_authorized_view: self._wrap_method( + self.update_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_authorized_view: self._wrap_method( + self.delete_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.modify_column_families: self._wrap_method( + self.modify_column_families, + default_timeout=300.0, + client_info=client_info, + ), + self.drop_row_range: self._wrap_method( + self.drop_row_range, + default_timeout=3600.0, + client_info=client_info, + ), + self.generate_consistency_token: self._wrap_method( + self.generate_consistency_token, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.check_consistency: self._wrap_method( + self.check_consistency, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=3600.0, + ), + default_timeout=3600.0, + client_info=client_info, + ), + self.snapshot_table: self._wrap_method( + self.snapshot_table, + default_timeout=None, + client_info=client_info, + ), + self.get_snapshot: self._wrap_method( + self.get_snapshot, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_snapshots: self._wrap_method( + self.list_snapshots, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_snapshot: self._wrap_method( + self.delete_snapshot, + default_timeout=300.0, + client_info=client_info, + ), + self.create_backup: self._wrap_method( + self.create_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.get_backup: self._wrap_method( + self.get_backup, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_backup: self._wrap_method( + self.update_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_backup: self._wrap_method( + self.delete_backup, + default_timeout=300.0, + client_info=client_info, + ), + self.list_backups: self._wrap_method( + self.list_backups, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.restore_table: self._wrap_method( + self.restore_table, + default_timeout=60.0, + client_info=client_info, + ), + self.copy_backup: self._wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_schema_bundle: self._wrap_method( + self.create_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.update_schema_bundle: self._wrap_method( + self.update_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.get_schema_bundle: self._wrap_method( + self.get_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.list_schema_bundles: self._wrap_method( + self.list_schema_bundles, + default_timeout=None, + client_info=client_info, + ), + self.delete_schema_bundle: self._wrap_method( + self.delete_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" __all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 41b893eb7..6c3815f79 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,32 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -48,18 +42,33 @@ from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from .base import ( - BigtableTableAdminTransport, - DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, -) +from .rest_base import _BaseBigtableTableAdminRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTableAdminRestInterceptor: """Interceptor for BigtableTableAdmin. @@ -92,6 +101,14 @@ def post_copy_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_create_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -100,6 +117,14 @@ def post_create_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_create_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_table(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -116,10 +141,18 @@ def post_create_table_from_snapshot(self, response): logging.log(f"Received response: {response}") return response + def pre_delete_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_delete_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata + def pre_delete_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_delete_snapshot(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -140,6 +173,14 @@ def post_generate_consistency_token(self, response): logging.log(f"Received response: {response}") return response + def pre_get_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -156,6 +197,14 @@ def post_get_iam_policy(self, response): logging.log(f"Received response: {response}") return response + def pre_get_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_snapshot(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -172,6 +221,14 @@ def post_get_table(self, response): logging.log(f"Received response: {response}") return response + def pre_list_authorized_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_authorized_views(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_backups(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -180,6 +237,14 @@ def post_list_backups(self, response): logging.log(f"Received response: {response}") return response + def pre_list_schema_bundles(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_schema_bundles(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_snapshots(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -244,6 +309,14 @@ def post_undelete_table(self, response): logging.log(f"Received response: {response}") return response + def pre_update_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_update_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -252,6 +325,14 @@ def post_update_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_update_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_update_table(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -261,7 +342,7 @@ def post_update_table(self, response): return response transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) """ @@ -269,8 +350,11 @@ def post_update_table(self, response): def pre_check_consistency( self, request: bigtable_table_admin.CheckConsistencyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CheckConsistencyRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for check_consistency Override in a subclass to manipulate the request or metadata @@ -283,17 +367,45 @@ def post_check_consistency( ) -> bigtable_table_admin.CheckConsistencyResponse: """Post-rpc interceptor for check_consistency - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_check_consistency_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_check_consistency` interceptor runs + before the `post_check_consistency_with_metadata` interceptor. """ return response + def post_check_consistency_with_metadata( + self, + response: bigtable_table_admin.CheckConsistencyResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CheckConsistencyResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for check_consistency + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_check_consistency_with_metadata` + interceptor in new development instead of the `post_check_consistency` interceptor. + When both interceptors are used, this `post_check_consistency_with_metadata` interceptor runs after the + `post_check_consistency` interceptor. The (possibly modified) response returned by + `post_check_consistency` will be passed to + `post_check_consistency_with_metadata`. + """ + return response, metadata + def pre_copy_backup( self, request: bigtable_table_admin.CopyBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for copy_backup Override in a subclass to manipulate the request or metadata @@ -306,17 +418,92 @@ def post_copy_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for copy_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_copy_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_copy_backup` interceptor runs + before the `post_copy_backup_with_metadata` interceptor. + """ + return response + + def post_copy_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for copy_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_copy_backup_with_metadata` + interceptor in new development instead of the `post_copy_backup` interceptor. + When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the + `post_copy_backup` interceptor. The (possibly modified) response returned by + `post_copy_backup` will be passed to + `post_copy_backup_with_metadata`. + """ + return response, metadata + + def pre_create_authorized_view( + self, + request: bigtable_table_admin.CreateAuthorizedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_authorized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_authorized_view + + DEPRECATED. Please use the `post_create_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_authorized_view` interceptor runs + before the `post_create_authorized_view_with_metadata` interceptor. """ return response + def post_create_authorized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_authorized_view_with_metadata` + interceptor in new development instead of the `post_create_authorized_view` interceptor. + When both interceptors are used, this `post_create_authorized_view_with_metadata` interceptor runs after the + `post_create_authorized_view` interceptor. The (possibly modified) response returned by + `post_create_authorized_view` will be passed to + `post_create_authorized_view_with_metadata`. + """ + return response, metadata + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_backup Override in a subclass to manipulate the request or metadata @@ -329,17 +516,91 @@ def post_create_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_create_backup` interceptor runs + before the `post_create_backup_with_metadata` interceptor. + """ + return response + + def post_create_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_backup_with_metadata` + interceptor in new development instead of the `post_create_backup` interceptor. + When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the + `post_create_backup` interceptor. The (possibly modified) response returned by + `post_create_backup` will be passed to + `post_create_backup_with_metadata`. + """ + return response, metadata + + def pre_create_schema_bundle( + self, + request: bigtable_table_admin.CreateSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_schema_bundle( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_schema_bundle + + DEPRECATED. Please use the `post_create_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_schema_bundle` interceptor runs + before the `post_create_schema_bundle_with_metadata` interceptor. """ return response + def post_create_schema_bundle_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_schema_bundle_with_metadata` + interceptor in new development instead of the `post_create_schema_bundle` interceptor. + When both interceptors are used, this `post_create_schema_bundle_with_metadata` interceptor runs after the + `post_create_schema_bundle` interceptor. The (possibly modified) response returned by + `post_create_schema_bundle` will be passed to + `post_create_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_create_table( self, request: bigtable_table_admin.CreateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for create_table Override in a subclass to manipulate the request or metadata @@ -350,18 +611,42 @@ def pre_create_table( def post_create_table(self, response: gba_table.Table) -> gba_table.Table: """Post-rpc interceptor for create_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_table` interceptor runs + before the `post_create_table_with_metadata` interceptor. """ return response + def post_create_table_with_metadata( + self, + response: gba_table.Table, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gba_table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_table_with_metadata` + interceptor in new development instead of the `post_create_table` interceptor. + When both interceptors are used, this `post_create_table_with_metadata` interceptor runs after the + `post_create_table` interceptor. The (possibly modified) response returned by + `post_create_table` will be passed to + `post_create_table_with_metadata`. + """ + return response, metadata + def pre_create_table_from_snapshot( self, request: bigtable_table_admin.CreateTableFromSnapshotRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.CreateTableFromSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_table_from_snapshot @@ -375,17 +660,58 @@ def post_create_table_from_snapshot( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_table_from_snapshot - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_table_from_snapshot_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_table_from_snapshot` interceptor runs + before the `post_create_table_from_snapshot_with_metadata` interceptor. """ return response + def post_create_table_from_snapshot_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_table_from_snapshot + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_table_from_snapshot_with_metadata` + interceptor in new development instead of the `post_create_table_from_snapshot` interceptor. + When both interceptors are used, this `post_create_table_from_snapshot_with_metadata` interceptor runs after the + `post_create_table_from_snapshot` interceptor. The (possibly modified) response returned by + `post_create_table_from_snapshot` will be passed to + `post_create_table_from_snapshot_with_metadata`. + """ + return response, metadata + + def pre_delete_authorized_view( + self, + request: bigtable_table_admin.DeleteAuthorizedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + def pre_delete_backup( self, request: bigtable_table_admin.DeleteBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_backup Override in a subclass to manipulate the request or metadata @@ -393,11 +719,29 @@ def pre_delete_backup( """ return request, metadata + def pre_delete_schema_bundle( + self, + request: bigtable_table_admin.DeleteSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + def pre_delete_snapshot( self, request: bigtable_table_admin.DeleteSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_snapshot Override in a subclass to manipulate the request or metadata @@ -408,8 +752,10 @@ def pre_delete_snapshot( def pre_delete_table( self, request: bigtable_table_admin.DeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for delete_table Override in a subclass to manipulate the request or metadata @@ -420,8 +766,11 @@ def pre_delete_table( def pre_drop_row_range( self, request: bigtable_table_admin.DropRowRangeRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DropRowRangeRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for drop_row_range Override in a subclass to manipulate the request or metadata @@ -432,9 +781,10 @@ def pre_drop_row_range( def pre_generate_consistency_token( self, request: bigtable_table_admin.GenerateConsistencyTokenRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.GenerateConsistencyTokenRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_consistency_token @@ -448,17 +798,94 @@ def post_generate_consistency_token( ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: """Post-rpc interceptor for generate_consistency_token - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_generate_consistency_token_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_generate_consistency_token` interceptor runs + before the `post_generate_consistency_token_with_metadata` interceptor. + """ + return response + + def post_generate_consistency_token_with_metadata( + self, + response: bigtable_table_admin.GenerateConsistencyTokenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GenerateConsistencyTokenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_consistency_token + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_generate_consistency_token_with_metadata` + interceptor in new development instead of the `post_generate_consistency_token` interceptor. + When both interceptors are used, this `post_generate_consistency_token_with_metadata` interceptor runs after the + `post_generate_consistency_token` interceptor. The (possibly modified) response returned by + `post_generate_consistency_token` will be passed to + `post_generate_consistency_token_with_metadata`. + """ + return response, metadata + + def pre_get_authorized_view( + self, + request: bigtable_table_admin.GetAuthorizedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_authorized_view( + self, response: table.AuthorizedView + ) -> table.AuthorizedView: + """Post-rpc interceptor for get_authorized_view + + DEPRECATED. Please use the `post_get_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_authorized_view` interceptor runs + before the `post_get_authorized_view_with_metadata` interceptor. """ return response + def post_get_authorized_view_with_metadata( + self, + response: table.AuthorizedView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.AuthorizedView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_authorized_view_with_metadata` + interceptor in new development instead of the `post_get_authorized_view` interceptor. + When both interceptors are used, this `post_get_authorized_view_with_metadata` interceptor runs after the + `post_get_authorized_view` interceptor. The (possibly modified) response returned by + `post_get_authorized_view` will be passed to + `post_get_authorized_view_with_metadata`. + """ + return response, metadata + def pre_get_backup( self, request: bigtable_table_admin.GetBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_backup Override in a subclass to manipulate the request or metadata @@ -469,17 +896,40 @@ def pre_get_backup( def post_get_backup(self, response: table.Backup) -> table.Backup: """Post-rpc interceptor for get_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_backup` interceptor runs + before the `post_get_backup_with_metadata` interceptor. """ return response + def post_get_backup_with_metadata( + self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_backup_with_metadata` + interceptor in new development instead of the `post_get_backup` interceptor. + When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the + `post_get_backup` interceptor. The (possibly modified) response returned by + `post_get_backup` will be passed to + `post_get_backup_with_metadata`. + """ + return response, metadata + def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -490,17 +940,91 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. + """ + return response + + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + + def pre_get_schema_bundle( + self, + request: bigtable_table_admin.GetSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_schema_bundle( + self, response: table.SchemaBundle + ) -> table.SchemaBundle: + """Post-rpc interceptor for get_schema_bundle + + DEPRECATED. Please use the `post_get_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_schema_bundle` interceptor runs + before the `post_get_schema_bundle_with_metadata` interceptor. """ return response + def post_get_schema_bundle_with_metadata( + self, + response: table.SchemaBundle, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.SchemaBundle, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_schema_bundle_with_metadata` + interceptor in new development instead of the `post_get_schema_bundle` interceptor. + When both interceptors are used, this `post_get_schema_bundle_with_metadata` interceptor runs after the + `post_get_schema_bundle` interceptor. The (possibly modified) response returned by + `post_get_schema_bundle` will be passed to + `post_get_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_get_snapshot( self, request: bigtable_table_admin.GetSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_snapshot Override in a subclass to manipulate the request or metadata @@ -511,17 +1035,42 @@ def pre_get_snapshot( def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: """Post-rpc interceptor for get_snapshot - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_snapshot_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_snapshot` interceptor runs + before the `post_get_snapshot_with_metadata` interceptor. """ return response + def post_get_snapshot_with_metadata( + self, + response: table.Snapshot, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.Snapshot, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_snapshot + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_snapshot_with_metadata` + interceptor in new development instead of the `post_get_snapshot` interceptor. + When both interceptors are used, this `post_get_snapshot_with_metadata` interceptor runs after the + `post_get_snapshot` interceptor. The (possibly modified) response returned by + `post_get_snapshot` will be passed to + `post_get_snapshot_with_metadata`. + """ + return response, metadata + def pre_get_table( self, request: bigtable_table_admin.GetTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_table Override in a subclass to manipulate the request or metadata @@ -532,17 +1081,92 @@ def pre_get_table( def post_get_table(self, response: table.Table) -> table.Table: """Post-rpc interceptor for get_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_table` interceptor runs + before the `post_get_table_with_metadata` interceptor. """ return response + def post_get_table_with_metadata( + self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_table_with_metadata` + interceptor in new development instead of the `post_get_table` interceptor. + When both interceptors are used, this `post_get_table_with_metadata` interceptor runs after the + `post_get_table` interceptor. The (possibly modified) response returned by + `post_get_table` will be passed to + `post_get_table_with_metadata`. + """ + return response, metadata + + def pre_list_authorized_views( + self, + request: bigtable_table_admin.ListAuthorizedViewsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListAuthorizedViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_authorized_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_authorized_views( + self, response: bigtable_table_admin.ListAuthorizedViewsResponse + ) -> bigtable_table_admin.ListAuthorizedViewsResponse: + """Post-rpc interceptor for list_authorized_views + + DEPRECATED. Please use the `post_list_authorized_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_list_authorized_views` interceptor runs + before the `post_list_authorized_views_with_metadata` interceptor. + """ + return response + + def post_list_authorized_views_with_metadata( + self, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListAuthorizedViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_authorized_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_authorized_views_with_metadata` + interceptor in new development instead of the `post_list_authorized_views` interceptor. + When both interceptors are used, this `post_list_authorized_views_with_metadata` interceptor runs after the + `post_list_authorized_views` interceptor. The (possibly modified) response returned by + `post_list_authorized_views` will be passed to + `post_list_authorized_views_with_metadata`. + """ + return response, metadata + def pre_list_backups( self, request: bigtable_table_admin.ListBackupsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_backups Override in a subclass to manipulate the request or metadata @@ -555,17 +1179,98 @@ def post_list_backups( ) -> bigtable_table_admin.ListBackupsResponse: """Post-rpc interceptor for list_backups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_backups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_list_backups` interceptor runs + before the `post_list_backups_with_metadata` interceptor. + """ + return response + + def post_list_backups_with_metadata( + self, + response: bigtable_table_admin.ListBackupsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListBackupsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_backups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_backups_with_metadata` + interceptor in new development instead of the `post_list_backups` interceptor. + When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the + `post_list_backups` interceptor. The (possibly modified) response returned by + `post_list_backups` will be passed to + `post_list_backups_with_metadata`. + """ + return response, metadata + + def pre_list_schema_bundles( + self, + request: bigtable_table_admin.ListSchemaBundlesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSchemaBundlesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_schema_bundles + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_schema_bundles( + self, response: bigtable_table_admin.ListSchemaBundlesResponse + ) -> bigtable_table_admin.ListSchemaBundlesResponse: + """Post-rpc interceptor for list_schema_bundles + + DEPRECATED. Please use the `post_list_schema_bundles_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_schema_bundles` interceptor runs + before the `post_list_schema_bundles_with_metadata` interceptor. """ return response + def post_list_schema_bundles_with_metadata( + self, + response: bigtable_table_admin.ListSchemaBundlesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSchemaBundlesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_schema_bundles + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_schema_bundles_with_metadata` + interceptor in new development instead of the `post_list_schema_bundles` interceptor. + When both interceptors are used, this `post_list_schema_bundles_with_metadata` interceptor runs after the + `post_list_schema_bundles` interceptor. The (possibly modified) response returned by + `post_list_schema_bundles` will be passed to + `post_list_schema_bundles_with_metadata`. + """ + return response, metadata + def pre_list_snapshots( self, request: bigtable_table_admin.ListSnapshotsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSnapshotsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_snapshots Override in a subclass to manipulate the request or metadata @@ -578,17 +1283,45 @@ def post_list_snapshots( ) -> bigtable_table_admin.ListSnapshotsResponse: """Post-rpc interceptor for list_snapshots - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_snapshots_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_snapshots` interceptor runs + before the `post_list_snapshots_with_metadata` interceptor. """ return response + def post_list_snapshots_with_metadata( + self, + response: bigtable_table_admin.ListSnapshotsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSnapshotsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_snapshots + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_snapshots_with_metadata` + interceptor in new development instead of the `post_list_snapshots` interceptor. + When both interceptors are used, this `post_list_snapshots_with_metadata` interceptor runs after the + `post_list_snapshots` interceptor. The (possibly modified) response returned by + `post_list_snapshots` will be passed to + `post_list_snapshots_with_metadata`. + """ + return response, metadata + def pre_list_tables( self, request: bigtable_table_admin.ListTablesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_tables Override in a subclass to manipulate the request or metadata @@ -601,18 +1334,44 @@ def post_list_tables( ) -> bigtable_table_admin.ListTablesResponse: """Post-rpc interceptor for list_tables - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_tables_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_tables` interceptor runs + before the `post_list_tables_with_metadata` interceptor. """ return response + def post_list_tables_with_metadata( + self, + response: bigtable_table_admin.ListTablesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListTablesResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_tables + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_tables_with_metadata` + interceptor in new development instead of the `post_list_tables` interceptor. + When both interceptors are used, this `post_list_tables_with_metadata` interceptor runs after the + `post_list_tables` interceptor. The (possibly modified) response returned by + `post_list_tables` will be passed to + `post_list_tables_with_metadata`. + """ + return response, metadata + def pre_modify_column_families( self, request: bigtable_table_admin.ModifyColumnFamiliesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.ModifyColumnFamiliesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for modify_column_families @@ -624,17 +1383,41 @@ def pre_modify_column_families( def post_modify_column_families(self, response: table.Table) -> table.Table: """Post-rpc interceptor for modify_column_families - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_modify_column_families_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_modify_column_families` interceptor runs + before the `post_modify_column_families_with_metadata` interceptor. """ return response + def post_modify_column_families_with_metadata( + self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for modify_column_families + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_modify_column_families_with_metadata` + interceptor in new development instead of the `post_modify_column_families` interceptor. + When both interceptors are used, this `post_modify_column_families_with_metadata` interceptor runs after the + `post_modify_column_families` interceptor. The (possibly modified) response returned by + `post_modify_column_families` will be passed to + `post_modify_column_families_with_metadata`. + """ + return response, metadata + def pre_restore_table( self, request: bigtable_table_admin.RestoreTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.RestoreTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for restore_table Override in a subclass to manipulate the request or metadata @@ -647,17 +1430,42 @@ def post_restore_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for restore_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_restore_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_restore_table` interceptor runs + before the `post_restore_table_with_metadata` interceptor. """ return response + def post_restore_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_restore_table_with_metadata` + interceptor in new development instead of the `post_restore_table` interceptor. + When both interceptors are used, this `post_restore_table_with_metadata` interceptor runs after the + `post_restore_table` interceptor. The (possibly modified) response returned by + `post_restore_table` will be passed to + `post_restore_table_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -668,17 +1476,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_snapshot_table( self, request: bigtable_table_admin.SnapshotTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.SnapshotTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for snapshot_table Override in a subclass to manipulate the request or metadata @@ -691,17 +1525,43 @@ def post_snapshot_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for snapshot_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_snapshot_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_snapshot_table` interceptor runs + before the `post_snapshot_table_with_metadata` interceptor. """ return response + def post_snapshot_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for snapshot_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_snapshot_table_with_metadata` + interceptor in new development instead of the `post_snapshot_table` interceptor. + When both interceptors are used, this `post_snapshot_table_with_metadata` interceptor runs after the + `post_snapshot_table` interceptor. The (possibly modified) response returned by + `post_snapshot_table` will be passed to + `post_snapshot_table_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -714,17 +1574,46 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_undelete_table( self, request: bigtable_table_admin.UndeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UndeleteTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for undelete_table Override in a subclass to manipulate the request or metadata @@ -737,17 +1626,92 @@ def post_undelete_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for undelete_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undelete_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_undelete_table` interceptor runs + before the `post_undelete_table_with_metadata` interceptor. + """ + return response + + def post_undelete_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undelete_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_undelete_table_with_metadata` + interceptor in new development instead of the `post_undelete_table` interceptor. + When both interceptors are used, this `post_undelete_table_with_metadata` interceptor runs after the + `post_undelete_table` interceptor. The (possibly modified) response returned by + `post_undelete_table` will be passed to + `post_undelete_table_with_metadata`. + """ + return response, metadata + + def pre_update_authorized_view( + self, + request: bigtable_table_admin.UpdateAuthorizedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_authorized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_authorized_view + + DEPRECATED. Please use the `post_update_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_authorized_view` interceptor runs + before the `post_update_authorized_view_with_metadata` interceptor. """ return response + def post_update_authorized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_authorized_view_with_metadata` + interceptor in new development instead of the `post_update_authorized_view` interceptor. + When both interceptors are used, this `post_update_authorized_view_with_metadata` interceptor runs after the + `post_update_authorized_view` interceptor. The (possibly modified) response returned by + `post_update_authorized_view` will be passed to + `post_update_authorized_view_with_metadata`. + """ + return response, metadata + def pre_update_backup( self, request: bigtable_table_admin.UpdateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for update_backup Override in a subclass to manipulate the request or metadata @@ -758,17 +1722,89 @@ def pre_update_backup( def post_update_backup(self, response: table.Backup) -> table.Backup: """Post-rpc interceptor for update_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_update_backup` interceptor runs + before the `post_update_backup_with_metadata` interceptor. + """ + return response + + def post_update_backup_with_metadata( + self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_backup_with_metadata` + interceptor in new development instead of the `post_update_backup` interceptor. + When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the + `post_update_backup` interceptor. The (possibly modified) response returned by + `post_update_backup` will be passed to + `post_update_backup_with_metadata`. + """ + return response, metadata + + def pre_update_schema_bundle( + self, + request: bigtable_table_admin.UpdateSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_schema_bundle( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_schema_bundle + + DEPRECATED. Please use the `post_update_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_schema_bundle` interceptor runs + before the `post_update_schema_bundle_with_metadata` interceptor. """ return response + def post_update_schema_bundle_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_schema_bundle_with_metadata` + interceptor in new development instead of the `post_update_schema_bundle` interceptor. + When both interceptors are used, this `post_update_schema_bundle_with_metadata` interceptor runs after the + `post_update_schema_bundle` interceptor. The (possibly modified) response returned by + `post_update_schema_bundle` will be passed to + `post_update_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_update_table( self, request: bigtable_table_admin.UpdateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for update_table Override in a subclass to manipulate the request or metadata @@ -781,12 +1817,35 @@ def post_update_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_table` interceptor runs + before the `post_update_table_with_metadata` interceptor. """ return response + def post_update_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_table_with_metadata` + interceptor in new development instead of the `post_update_table` interceptor. + When both interceptors are used, this `post_update_table_with_metadata` interceptor runs after the + `post_update_table` interceptor. The (possibly modified) response returned by + `post_update_table` will be passed to + `post_update_table_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableTableAdminRestStub: @@ -795,8 +1854,8 @@ class BigtableTableAdminRestStub: _interceptor: BigtableTableAdminRestInterceptor -class BigtableTableAdminRestTransport(BigtableTableAdminTransport): - """REST backend transport for BigtableTableAdmin. +class BigtableTableAdminRestTransport(_BaseBigtableTableAdminRestTransport): + """REST backend synchronous transport for BigtableTableAdmin. Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -809,7 +1868,6 @@ class BigtableTableAdminRestTransport(BigtableTableAdminTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -831,16 +1889,17 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -863,21 +1922,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -941,19 +1991,35 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: # Return the client from cache. return self._operations_client - class _CheckConsistency(BigtableTableAdminRestStub): + class _CheckConsistency( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CheckConsistency") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CheckConsistency") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -961,7 +2027,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Call the check consistency method over HTTP. @@ -972,8 +2038,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.CheckConsistencyResponse: @@ -982,50 +2050,62 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_http_options() + ) + request, metadata = self._interceptor.pre_check_consistency( request, metadata ) - pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CheckConsistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CheckConsistency._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1038,22 +2118,65 @@ def __call__( pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_consistency(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_check_consistency_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.CheckConsistencyResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.check_consistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CopyBackup(BigtableTableAdminRestStub): + class _CopyBackup( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("CopyBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CopyBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1061,7 +2184,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the copy backup method over HTTP. @@ -1072,8 +2195,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1083,49 +2208,218 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_copy_backup(request, metadata) - pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_copy_backup(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CopyBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) + response = BigtableTableAdminRestTransport._CopyBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_copy_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_copy_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.copy_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.CreateAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), data=body, ) + return response + + def __call__( + self, + request: bigtable_table_admin.CreateAuthorizedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create authorized view method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateAuthorizedViewRequest): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_authorized_view( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._CreateAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. @@ -1135,24 +2429,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_copy_backup(resp) + + resp = self._interceptor.post_create_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CreateBackup(BigtableTableAdminRestStub): + class _CreateBackup( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "backupId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1160,7 +2494,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create backup method over HTTP. @@ -1171,8 +2505,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1182,49 +2518,218 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - "body": "backup", - }, - ] - request, metadata = self._interceptor.pre_create_backup(request, metadata) - pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_create_backup(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = BigtableTableAdminRestTransport._CreateBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.CreateSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), data=body, ) + return response + + def __call__( + self, + request: bigtable_table_admin.CreateSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create schema bundle method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateSchemaBundleRequest): + The request object. The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._CreateSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. @@ -1234,22 +2739,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_backup(resp) + + resp = self._interceptor.post_create_schema_bundle(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_schema_bundle_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_schema_bundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateSchemaBundle", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CreateTable(BigtableTableAdminRestStub): + class _CreateTable( + _BaseBigtableTableAdminRestTransport._BaseCreateTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1257,7 +2804,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Call the create table method over HTTP. @@ -1268,8 +2815,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gba_table.Table: @@ -1280,48 +2829,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_create_table(request, metadata) - pb_request = bigtable_table_admin.CreateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_create_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CreateTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1334,22 +2895,64 @@ def __call__( pb_resp = gba_table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gba_table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _CreateTableFromSnapshot(BigtableTableAdminRestStub): + class _CreateTableFromSnapshot( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateTableFromSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1357,7 +2960,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create table from snapshot method over HTTP. @@ -1376,8 +2979,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1387,50 +2992,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_http_options() + ) + request, metadata = self._interceptor.pre_create_table_from_snapshot( request, metadata ) - pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTableFromSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._CreateTableFromSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1441,77 +3060,140 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table_from_snapshot(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_table_from_snapshot_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _DeleteBackup(BigtableTableAdminRestStub): + class _DeleteAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, - request: bigtable_table_admin.DeleteBackupRequest, + request: bigtable_table_admin.DeleteAuthorizedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): - r"""Call the delete backup method over HTTP. + r"""Call the delete authorized view method over HTTP. Args: - request (~.bigtable_table_admin.DeleteBackupRequest): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + request (~.bigtable_table_admin.DeleteAuthorizedViewRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] - request, metadata = self._interceptor.pre_delete_backup(request, metadata) - pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_delete_authorized_view( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableTableAdminRestTransport._DeleteAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1519,19 +3201,254 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteSnapshot(BigtableTableAdminRestStub): + class _DeleteBackup( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteSnapshot") + return hash("BigtableTableAdminRestTransport.DeleteBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.DeleteBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete backup method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteBackupRequest): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_http_options() + ) - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + request, metadata = self._interceptor.pre_delete_backup(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_transcoded_request( + http_options, request + ) - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._DeleteBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.DeleteSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.DeleteSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete schema bundle method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteSchemaBundleRequest): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._DeleteSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteSnapshot( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.DeleteSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1539,7 +3456,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete snapshot method over HTTP. @@ -1557,43 +3474,61 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] - request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) - pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._DeleteSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1601,19 +3536,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteTable(BigtableTableAdminRestStub): + class _DeleteTable( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1621,7 +3571,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete table method over HTTP. @@ -1632,43 +3582,61 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] - request, metadata = self._interceptor.pre_delete_table(request, metadata) - pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_delete_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._DeleteTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1676,19 +3644,35 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DropRowRange(BigtableTableAdminRestStub): + class _DropRowRange( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DropRowRange") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DropRowRange") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1696,7 +3680,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the drop row range method over HTTP. @@ -1707,52 +3691,66 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_drop_row_range(request, metadata) - pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DropRowRange", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DropRowRange", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._DropRowRange._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1760,19 +3758,35 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _GenerateConsistencyToken(BigtableTableAdminRestStub): + class _GenerateConsistencyToken( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GenerateConsistencyToken") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GenerateConsistencyToken") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1780,7 +3794,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Call the generate consistency token method over HTTP. @@ -1792,8 +3806,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.GenerateConsistencyTokenResponse: @@ -1802,52 +3818,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_http_options() + ) + request, metadata = self._interceptor.pre_generate_consistency_token( request, metadata ) - pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - request + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GenerateConsistencyToken", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._GenerateConsistencyToken._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1860,81 +3888,152 @@ def __call__( pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_generate_consistency_token_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetBackup(BigtableTableAdminRestStub): + class _GetAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GetBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, - request: bigtable_table_admin.GetBackupRequest, + request: bigtable_table_admin.GetAuthorizedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Call the get backup method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.AuthorizedView: + r"""Call the get authorized view method over HTTP. Args: - request (~.bigtable_table_admin.GetBackupRequest): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + request (~.bigtable_table_admin.GetAuthorizedViewRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. + ~.table.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. + """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] - request, metadata = self._interceptor.pre_get_backup(request, metadata) - pb_request = bigtable_table_admin.GetBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_authorized_view( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1943,26 +4042,213 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) + resp = table.AuthorizedView() + pb_resp = table.AuthorizedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_backup(resp) + + resp = self._interceptor.post_get_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.AuthorizedView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetIamPolicy(BigtableTableAdminRestStub): + class _GetBackup( + _BaseBigtableTableAdminRestTransport._BaseGetBackup, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.GetBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Call the get backup method over HTTP. + + Args: + request (~.bigtable_table_admin.GetBackupRequest): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.table.Backup: + A backup of a Cloud Bigtable table. + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_backup(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._GetBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Backup() + pb_resp = table.Backup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetIamPolicy( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1970,7 +4256,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -1980,8 +4266,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2063,53 +4351,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2122,22 +4417,213 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetSnapshot(BigtableTableAdminRestStub): + class _GetSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GetSnapshot") + return hash("BigtableTableAdminRestTransport.GetSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + def __call__( + self, + request: bigtable_table_admin.GetSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Call the get schema bundle method over HTTP. - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + Args: + request (~.bigtable_table_admin.GetSchemaBundleRequest): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.table.SchemaBundle: + A named collection of related + schemas. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._GetSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.SchemaBundle() + pb_resp = table.SchemaBundle.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_schema_bundle(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_schema_bundle_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.SchemaBundle.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_schema_bundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSchemaBundle", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetSnapshot( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.GetSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2145,7 +4631,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Call the get snapshot method over HTTP. @@ -2163,8 +4649,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Snapshot: @@ -2184,39 +4672,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] - request, metadata = self._interceptor.pre_get_snapshot(request, metadata) - pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_snapshot(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2229,22 +4733,62 @@ def __call__( pb_resp = table.Snapshot.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_snapshot(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_snapshot_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Snapshot.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetTable(BigtableTableAdminRestStub): + class _GetTable( + _BaseBigtableTableAdminRestTransport._BaseGetTable, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("GetTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2252,7 +4796,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the get table method over HTTP. @@ -2263,8 +4807,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -2275,39 +4821,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] - request, metadata = self._interceptor.pre_get_table(request, metadata) - pb_request = bigtable_table_admin.GetTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetTable._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_get_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2320,83 +4882,146 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListBackups(BigtableTableAdminRestStub): + class _ListAuthorizedViews( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ListBackups") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListAuthorizedViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, - request: bigtable_table_admin.ListBackupsRequest, + request: bigtable_table_admin.ListAuthorizedViewsRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.ListBackupsResponse: - r"""Call the list backups method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.ListAuthorizedViewsResponse: + r"""Call the list authorized views method over HTTP. Args: - request (~.bigtable_table_admin.ListBackupsRequest): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + request (~.bigtable_table_admin.ListAuthorizedViewsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.bigtable_table_admin.ListBackupsResponse: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + ~.bigtable_table_admin.ListAuthorizedViewsResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - }, - ] - request, metadata = self._interceptor.pre_list_backups(request, metadata) - pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_list_authorized_views( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListAuthorizedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2405,26 +5030,373 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_table_admin.ListBackupsResponse() - pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) + resp = bigtable_table_admin.ListAuthorizedViewsResponse() + pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_backups(resp) - return resp - - class _ListSnapshots(BigtableTableAdminRestStub): - def __hash__(self): - return hash("ListSnapshots") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + resp = self._interceptor.post_list_authorized_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_authorized_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_authorized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + class _ListBackups( + _BaseBigtableTableAdminRestTransport._BaseListBackups, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.ListBackups") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.ListBackupsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.ListBackupsResponse: + r"""Call the list backups method over HTTP. + + Args: + request (~.bigtable_table_admin.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_table_admin.ListBackupsResponse: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_backups(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListBackups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._ListBackups._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListBackupsResponse() + pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_backups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_backups_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListBackupsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_backups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListSchemaBundles( + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.ListSchemaBundles") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.ListSchemaBundlesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.ListSchemaBundlesResponse: + r"""Call the list schema bundles method over HTTP. + + Args: + request (~.bigtable_table_admin.ListSchemaBundlesRequest): + The request object. The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_table_admin.ListSchemaBundlesResponse: + The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_schema_bundles( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSchemaBundles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSchemaBundles", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._ListSchemaBundles._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListSchemaBundlesResponse() + pb_resp = bigtable_table_admin.ListSchemaBundlesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_schema_bundles(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_schema_bundles_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListSchemaBundlesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_schema_bundles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSchemaBundles", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListSnapshots( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.ListSnapshots") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2432,7 +5404,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListSnapshotsResponse: r"""Call the list snapshots method over HTTP. @@ -2450,8 +5422,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListSnapshotsResponse: @@ -2467,39 +5441,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", - }, - ] - request, metadata = self._interceptor.pre_list_snapshots(request, metadata) - pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_list_snapshots(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSnapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._ListSnapshots._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2512,22 +5502,64 @@ def __call__( pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_snapshots(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_snapshots_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListSnapshotsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_snapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ListTables(BigtableTableAdminRestStub): + class _ListTables( + _BaseBigtableTableAdminRestTransport._BaseListTables, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("ListTables") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListTables") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2535,7 +5567,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListTablesResponse: r"""Call the list tables method over HTTP. @@ -2546,8 +5578,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListTablesResponse: @@ -2556,39 +5590,55 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - }, - ] - request, metadata = self._interceptor.pre_list_tables(request, metadata) - pb_request = bigtable_table_admin.ListTablesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListTables._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_list_tables(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListTables._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListTables._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListTables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._ListTables._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2601,22 +5651,66 @@ def __call__( pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tables(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_tables_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListTablesResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_tables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ModifyColumnFamilies(BigtableTableAdminRestStub): + class _ModifyColumnFamilies( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ModifyColumnFamilies") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ModifyColumnFamilies") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2624,7 +5718,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the modify column families method over HTTP. @@ -2635,8 +5729,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -2647,50 +5743,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_http_options() + ) + request, metadata = self._interceptor.pre_modify_column_families( request, metadata ) - pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ModifyColumnFamilies", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._ModifyColumnFamilies._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2703,22 +5813,64 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_modify_column_families(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_modify_column_families_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.modify_column_families", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _RestoreTable(BigtableTableAdminRestStub): + class _RestoreTable( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("RestoreTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.RestoreTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2726,7 +5878,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the restore table method over HTTP. @@ -2737,8 +5889,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2748,48 +5902,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_restore_table(request, metadata) - pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_restore_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.RestoreTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._RestoreTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2800,22 +5966,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_restore_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.restore_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _SetIamPolicy(BigtableTableAdminRestStub): + class _SetIamPolicy( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2823,7 +6031,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2833,8 +6041,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2916,53 +6126,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2975,22 +6192,64 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _SnapshotTable(BigtableTableAdminRestStub): + class _SnapshotTable( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("SnapshotTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.SnapshotTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2998,7 +6257,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the snapshot table method over HTTP. @@ -3016,8 +6275,471 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_http_options() + ) + + request, metadata = self._interceptor.pre_snapshot_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SnapshotTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._SnapshotTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_snapshot_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_snapshot_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.snapshot_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _TestIamPermissions( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for ``TestIamPermissions`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UndeleteTable( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.UndeleteTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_table_admin.UndeleteTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the undelete table method over HTTP. + + Args: + request (~.bigtable_table_admin.UndeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_http_options() + ) + + request, metadata = self._interceptor.pre_undelete_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UndeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._UndeleteTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_undelete_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_undelete_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.undelete_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.UpdateAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_table_admin.UpdateAuthorizedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update authorized view method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateAuthorizedViewRequest): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3027,48 +6749,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_snapshot_table(request, metadata) - pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_authorized_view( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._UpdateAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3079,96 +6817,146 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_snapshot_table(resp) + + resp = self._interceptor.post_update_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _TestIamPermissions(BigtableTableAdminRestStub): + class _UpdateBackup( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UpdateBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, - request: iam_policy_pb2.TestIamPermissionsRequest, + request: bigtable_table_admin.UpdateBackupRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.Backup: + r"""Call the update backup method over HTTP. Args: - request (~.iam_policy_pb2.TestIamPermissionsRequest): - The request object. Request message for ``TestIamPermissions`` method. + request (~.bigtable_table_admin.UpdateBackupRequest): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.iam_policy_pb2.TestIamPermissionsResponse: - Response message for ``TestIamPermissions`` method. + ~.table.Backup: + A backup of a Cloud Bigtable table. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_test_iam_permissions( - request, metadata + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_http_options() ) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_backup(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._UpdateBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3177,46 +6965,90 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp + resp = table.Backup() + pb_resp = table.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_test_iam_permissions(resp) + + resp = self._interceptor.post_update_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UndeleteTable(BigtableTableAdminRestStub): + class _UpdateSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UndeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UpdateSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, - request: bigtable_table_admin.UndeleteTableRequest, + request: bigtable_table_admin.UpdateSchemaBundleRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the undelete table method over HTTP. + r"""Call the update schema bundle method over HTTP. Args: - request (~.bigtable_table_admin.UndeleteTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + request (~.bigtable_table_admin.UpdateSchemaBundleRequest): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3226,48 +7058,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_undelete_table(request, metadata) - pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._UpdateSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3278,122 +7126,64 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_undelete_table(resp) + + resp = self._interceptor.post_update_schema_bundle(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_schema_bundle_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_schema_bundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateSchemaBundle", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateBackup(BigtableTableAdminRestStub): + class _UpdateTable( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UpdateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - - def __call__( - self, - request: bigtable_table_admin.UpdateBackupRequest, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Call the update backup method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateBackupRequest): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. - """ - - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", - "body": "backup", - }, - ] - request, metadata = self._interceptor.pre_update_backup(request, metadata) - pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + return hash("BigtableTableAdminRestTransport.UpdateTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): uri = transcoded_request["uri"] method = transcoded_request["method"] - - # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) - ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), data=body, ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_backup(resp) - return resp - - class _UpdateTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("UpdateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return response def __call__( self, @@ -3401,7 +7191,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update table method over HTTP. @@ -3412,8 +7202,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3423,48 +7215,60 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", - "body": "table", - }, - ] - request, metadata = self._interceptor.pre_update_table(request, metadata) - pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_update_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._UpdateTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3475,7 +7279,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property @@ -3497,6 +7327,16 @@ def copy_backup( # In C++ this would require a dynamic_cast return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup( self, @@ -3505,6 +7345,16 @@ def create_backup( # In C++ this would require a dynamic_cast return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def create_table( self, @@ -3523,6 +7373,14 @@ def create_table_from_snapshot( # In C++ this would require a dynamic_cast return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_authorized_view( + self, + ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def delete_backup( self, @@ -3531,6 +7389,14 @@ def delete_backup( # In C++ this would require a dynamic_cast return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def delete_snapshot( self, @@ -3566,6 +7432,16 @@ def generate_consistency_token( # In C++ this would require a dynamic_cast return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def get_backup( self, @@ -3582,6 +7458,14 @@ def get_iam_policy( # In C++ this would require a dynamic_cast return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + @property + def get_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def get_snapshot( self, @@ -3598,6 +7482,17 @@ def get_table( # In C++ this would require a dynamic_cast return self._GetTable(self._session, self._host, self._interceptor) # type: ignore + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + bigtable_table_admin.ListAuthorizedViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListAuthorizedViews(self._session, self._host, self._interceptor) # type: ignore + @property def list_backups( self, @@ -3609,6 +7504,17 @@ def list_backups( # In C++ this would require a dynamic_cast return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + bigtable_table_admin.ListSchemaBundlesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListSchemaBundles(self._session, self._host, self._interceptor) # type: ignore + @property def list_snapshots( self, @@ -3686,6 +7592,16 @@ def undelete_table( # In C++ this would require a dynamic_cast return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def update_backup( self, @@ -3694,6 +7610,16 @@ def update_backup( # In C++ this would require a dynamic_cast return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def update_table( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py new file mode 100644 index 000000000..ef6c2374d --- /dev/null +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py @@ -0,0 +1,2001 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseBigtableTableAdminRestTransport(BigtableTableAdminTransport): + """Base REST backend transport for BigtableTableAdmin. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCheckConsistency: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCopyBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "authorizedViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + "body": "authorized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "backupId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + "body": "backup", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "schemaBundleId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", + "body": "schema_bundle", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateTableFromSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDropRowRange: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGenerateConsistencyToken: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:getIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAuthorizedViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListAuthorizedViewsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListBackups: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListBackups._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListSchemaBundles: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListSchemaBundlesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListSnapshots: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListTables: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListTablesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListTables._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseModifyColumnFamilies: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseRestoreTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSnapshotTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:testIamPermissions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUndeleteTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}", + "body": "authorized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", + "body": "backup", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}", + "body": "schema_bundle", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", + "body": "table", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableTableAdminRestTransport",) diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index a2fefffc8..d2036c7a3 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,12 +19,20 @@ CreateClusterRequest, CreateInstanceMetadata, CreateInstanceRequest, + CreateLogicalViewMetadata, + CreateLogicalViewRequest, + CreateMaterializedViewMetadata, + CreateMaterializedViewRequest, DeleteAppProfileRequest, DeleteClusterRequest, DeleteInstanceRequest, + DeleteLogicalViewRequest, + DeleteMaterializedViewRequest, GetAppProfileRequest, GetClusterRequest, GetInstanceRequest, + GetLogicalViewRequest, + GetMaterializedViewRequest, ListAppProfilesRequest, ListAppProfilesResponse, ListClustersRequest, @@ -33,6 +41,10 @@ ListHotTabletsResponse, ListInstancesRequest, ListInstancesResponse, + ListLogicalViewsRequest, + ListLogicalViewsResponse, + ListMaterializedViewsRequest, + ListMaterializedViewsResponse, PartialUpdateClusterMetadata, PartialUpdateClusterRequest, PartialUpdateInstanceRequest, @@ -40,28 +52,45 @@ UpdateAppProfileRequest, UpdateClusterMetadata, UpdateInstanceMetadata, + UpdateLogicalViewMetadata, + UpdateLogicalViewRequest, + UpdateMaterializedViewMetadata, + UpdateMaterializedViewRequest, ) from .bigtable_table_admin import ( CheckConsistencyRequest, CheckConsistencyResponse, CopyBackupMetadata, CopyBackupRequest, + CreateAuthorizedViewMetadata, + CreateAuthorizedViewRequest, CreateBackupMetadata, CreateBackupRequest, + CreateSchemaBundleMetadata, + CreateSchemaBundleRequest, CreateTableFromSnapshotMetadata, CreateTableFromSnapshotRequest, CreateTableRequest, + DataBoostReadLocalWrites, + DeleteAuthorizedViewRequest, DeleteBackupRequest, + DeleteSchemaBundleRequest, DeleteSnapshotRequest, DeleteTableRequest, DropRowRangeRequest, GenerateConsistencyTokenRequest, GenerateConsistencyTokenResponse, + GetAuthorizedViewRequest, GetBackupRequest, + GetSchemaBundleRequest, GetSnapshotRequest, GetTableRequest, + ListAuthorizedViewsRequest, + ListAuthorizedViewsResponse, ListBackupsRequest, ListBackupsResponse, + ListSchemaBundlesRequest, + ListSchemaBundlesResponse, ListSnapshotsRequest, ListSnapshotsResponse, ListTablesRequest, @@ -72,9 +101,14 @@ RestoreTableRequest, SnapshotTableMetadata, SnapshotTableRequest, + StandardReadRemoteWrites, UndeleteTableMetadata, UndeleteTableRequest, + UpdateAuthorizedViewMetadata, + UpdateAuthorizedViewRequest, UpdateBackupRequest, + UpdateSchemaBundleMetadata, + UpdateSchemaBundleRequest, UpdateTableMetadata, UpdateTableRequest, ) @@ -89,19 +123,29 @@ Cluster, HotTablet, Instance, + LogicalView, + MaterializedView, ) from .table import ( + AuthorizedView, Backup, BackupInfo, ChangeStreamConfig, ColumnFamily, EncryptionInfo, GcRule, + ProtoSchema, RestoreInfo, + SchemaBundle, Snapshot, Table, + TieredStorageConfig, + TieredStorageRule, RestoreSourceType, ) +from .types import ( + Type, +) __all__ = ( "CreateAppProfileRequest", @@ -109,12 +153,20 @@ "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", "DeleteAppProfileRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", "GetAppProfileRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", "ListAppProfilesRequest", "ListAppProfilesResponse", "ListClustersRequest", @@ -123,6 +175,10 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", @@ -130,26 +186,43 @@ "UpdateAppProfileRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", "CheckConsistencyRequest", "CheckConsistencyResponse", "CopyBackupMetadata", "CopyBackupRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", + "GetAuthorizedViewRequest", "GetBackupRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", @@ -160,9 +233,14 @@ "RestoreTableRequest", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "UndeleteTableMetadata", "UndeleteTableRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", "OperationProgress", @@ -173,14 +251,22 @@ "Cluster", "HotTablet", "Instance", + "LogicalView", + "MaterializedView", + "AuthorizedView", "Backup", "BackupInfo", "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", + "ProtoSchema", "RestoreInfo", + "SchemaBundle", "Snapshot", "Table", + "TieredStorageConfig", + "TieredStorageRule", "RestoreSourceType", + "Type", ) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 87332a351..4197ed0b7 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -53,6 +53,22 @@ "UpdateAppProfileMetadata", "ListHotTabletsRequest", "ListHotTabletsResponse", + "CreateLogicalViewRequest", + "CreateLogicalViewMetadata", + "GetLogicalViewRequest", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "UpdateLogicalViewRequest", + "UpdateLogicalViewMetadata", + "DeleteLogicalViewRequest", + "CreateMaterializedViewRequest", + "CreateMaterializedViewMetadata", + "GetMaterializedViewRequest", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", + "UpdateMaterializedViewRequest", + "UpdateMaterializedViewMetadata", + "DeleteMaterializedViewRequest", }, ) @@ -77,8 +93,7 @@ class CreateInstanceRequest(proto.Message): mapped by desired cluster ID, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently, - at most four clusters can be specified. + Fields marked ``OutputOnly`` must be left blank. """ parent: str = proto.Field( @@ -888,4 +903,462 @@ def raw_page(self): ) +class CreateLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateLogicalView. + + Attributes: + parent (str): + Required. The parent instance where this logical view will + be created. Format: + ``projects/{project}/instances/{instance}``. + logical_view_id (str): + Required. The ID to use for the logical view, + which will become the final component of the + logical view's resource name. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + logical_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + logical_view: gba_instance.LogicalView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.LogicalView, + ) + + +class CreateLogicalViewMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateLogicalView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest): + The request that prompted the initiation of + this CreateLogicalView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "CreateLogicalViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateLogicalViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetLogicalView. + + Attributes: + name (str): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListLogicalViewsRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListLogicalViews. + + Attributes: + parent (str): + Required. The unique name of the instance for which the list + of logical views is requested. Values are of the form + ``projects/{project}/instances/{instance}``. + page_size (int): + Optional. The maximum number of logical views + to return. The service may return fewer than + this value + page_token (str): + Optional. A page token, received from a previous + ``ListLogicalViews`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListLogicalViews`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListLogicalViewsResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListLogicalViews. + + Attributes: + logical_views (MutableSequence[google.cloud.bigtable_admin_v2.types.LogicalView]): + The list of requested logical views. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + logical_views: MutableSequence[gba_instance.LogicalView] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.LogicalView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.UpdateLogicalView. + + Attributes: + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify the + view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + """ + + logical_view: gba_instance.LogicalView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.LogicalView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateLogicalViewMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateLogicalView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest): + The request that prompted the initiation of + this UpdateLogicalView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "UpdateLogicalViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateLogicalViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteLogicalView. + + Attributes: + name (str): + Required. The unique name of the logical view to be deleted. + Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + etag (str): + Optional. The current etag of the logical + view. If an etag is provided and does not match + the current etag of the logical view, deletion + will be blocked and an ABORTED error will be + returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.CreateMaterializedView. + + Attributes: + parent (str): + Required. The parent instance where this materialized view + will be created. Format: + ``projects/{project}/instances/{instance}``. + materialized_view_id (str): + Required. The ID to use for the materialized + view, which will become the final component of + the materialized view's resource name. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + materialized_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + materialized_view: gba_instance.MaterializedView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.MaterializedView, + ) + + +class CreateMaterializedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateMaterializedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest): + The request that prompted the initiation of + this CreateMaterializedView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "CreateMaterializedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateMaterializedViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.GetMaterializedView. + + Attributes: + name (str): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMaterializedViewsRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.ListMaterializedViews. + + Attributes: + parent (str): + Required. The unique name of the instance for which the list + of materialized views is requested. Values are of the form + ``projects/{project}/instances/{instance}``. + page_size (int): + Optional. The maximum number of materialized + views to return. The service may return fewer + than this value + page_token (str): + Optional. A page token, received from a previous + ``ListMaterializedViews`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListMaterializedViews`` must match the call that provided + the page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMaterializedViewsResponse(proto.Message): + r"""Response message for + BigtableInstanceAdmin.ListMaterializedViews. + + Attributes: + materialized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.MaterializedView]): + The list of requested materialized views. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + materialized_views: MutableSequence[ + gba_instance.MaterializedView + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.MaterializedView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + + Attributes: + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + """ + + materialized_view: gba_instance.MaterializedView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.MaterializedView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateMaterializedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + UpdateMaterializedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest): + The request that prompted the initiation of + this UpdateMaterializedView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "UpdateMaterializedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateMaterializedViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + + Attributes: + name (str): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + etag (str): + Optional. The current etag of the + materialized view. If an etag is provided and + does not match the current etag of the + materialized view, deletion will be blocked and + an ABORTED error will be returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 6a3b31a1e..69de07a2a 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,6 +47,8 @@ "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", "CheckConsistencyRequest", + "StandardReadRemoteWrites", + "DataBoostReadLocalWrites", "CheckConsistencyResponse", "SnapshotTableRequest", "GetSnapshotRequest", @@ -64,6 +66,22 @@ "ListBackupsResponse", "CopyBackupRequest", "CopyBackupMetadata", + "CreateAuthorizedViewRequest", + "CreateAuthorizedViewMetadata", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", + "GetAuthorizedViewRequest", + "UpdateAuthorizedViewRequest", + "UpdateAuthorizedViewMetadata", + "DeleteAuthorizedViewRequest", + "CreateSchemaBundleRequest", + "CreateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", + "UpdateSchemaBundleMetadata", + "GetSchemaBundleRequest", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", + "DeleteSchemaBundleRequest", }, ) @@ -217,20 +235,20 @@ class CreateTableRequest(proto.Message): Example: - - Row keys := - ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 - ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` + - Row keys := + ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 + ``[other, ) => {"other", "zz"}.`` """ class Split(proto.Message): @@ -464,15 +482,19 @@ class UpdateTableRequest(proto.Message): which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The - wildcard (*) path is currently not supported. Currently + wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. + ignore_warnings (bool): + Optional. If true, ignore safety checks when + updating the table. """ table: gba_table.Table = proto.Field( @@ -485,6 +507,10 @@ class UpdateTableRequest(proto.Message): number=2, message=field_mask_pb2.FieldMask, ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) class UpdateTableMetadata(proto.Message): @@ -597,6 +623,9 @@ class ModifyColumnFamiliesRequest(proto.Message): earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example). + ignore_warnings (bool): + Optional. If true, ignore safety checks when + modifying the column families. """ class Modification(proto.Message): @@ -629,6 +658,11 @@ class Modification(proto.Message): given ID, or fail if no such family exists. This field is a member of `oneof`_ ``mod``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A mask specifying which fields (e.g. ``gc_rule``) + in the ``update`` mod should be updated, ignored for other + modification types. If unset or empty, we treat it as + updating ``gc_rule`` to be backward compatible. """ id: str = proto.Field( @@ -652,6 +686,11 @@ class Modification(proto.Message): number=4, oneof="mod", ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) name: str = proto.Field( proto.STRING, @@ -662,6 +701,10 @@ class Modification(proto.Message): number=2, message=Modification, ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) class GenerateConsistencyTokenRequest(proto.Message): @@ -700,6 +743,13 @@ class CheckConsistencyRequest(proto.Message): r"""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): Required. The unique name of the Table for which to check @@ -708,6 +758,20 @@ class CheckConsistencyRequest(proto.Message): consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. + standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites): + Checks that reads using an app profile with + ``StandardIsolation`` can see all writes committed before + the token was created, even if the read and write target + different clusters. + + This field is a member of `oneof`_ ``mode``. + data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites): + Checks that reads using an app profile with + ``DataBoostIsolationReadOnly`` can see all writes committed + before the token was created, but only if the read and write + target the same cluster. + + This field is a member of `oneof`_ ``mode``. """ name: str = proto.Field( @@ -718,6 +782,32 @@ class CheckConsistencyRequest(proto.Message): proto.STRING, number=2, ) + standard_read_remote_writes: "StandardReadRemoteWrites" = proto.Field( + proto.MESSAGE, + number=3, + oneof="mode", + message="StandardReadRemoteWrites", + ) + data_boost_read_local_writes: "DataBoostReadLocalWrites" = proto.Field( + proto.MESSAGE, + number=4, + oneof="mode", + message="DataBoostReadLocalWrites", + ) + + +class StandardReadRemoteWrites(proto.Message): + r"""Checks that all writes before the consistency token was + generated are replicated in every cluster and readable. + + """ + + +class DataBoostReadLocalWrites(proto.Message): + r"""Checks that all writes before the consistency token was + generated in the same cluster are readable by Databoost. + + """ class CheckConsistencyResponse(proto.Message): @@ -1009,7 +1099,7 @@ class CreateBackupRequest(proto.Message): name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. backup (google.cloud.bigtable_admin_v2.types.Backup): Required. The backup to create. """ @@ -1077,7 +1167,7 @@ class UpdateBackupRequest(proto.Message): required. Other fields are ignored. Update is only supported for the following fields: - - ``backup.expire_time``. + - ``backup.expire_time``. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A mask specifying which fields (e.g. ``expire_time``) in the Backup resource should be updated. @@ -1156,16 +1246,16 @@ class ListBackupsRequest(proto.Message): The fields eligible for filtering are: - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` To filter on multiple expressions, provide each separate expression within parentheses. By default, each expression @@ -1174,20 +1264,20 @@ class ListBackupsRequest(proto.Message): Some examples of using filters are: - - ``name:"exact"`` --> The backup's name is the string - "exact". - - ``name:howl`` --> The backup's name contains the string - "howl". - - ``source_table:prod`` --> The source_table's name - contains the string "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready - for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` - --> The backup name contains the string "howl" and - start_time of the backup is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is - greater than 10GB + - ``name:"exact"`` --> The backup's name is the string + "exact". + - ``name:howl`` --> The backup's name contains the string + "howl". + - ``source_table:prod`` --> The source_table's name contains + the string "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready + for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` + --> The backup name contains the string "howl" and + start_time of the backup is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is + greater than 10GB order_by (str): An expression for specifying the sort order of the results of the request. The string value should specify one or more @@ -1196,13 +1286,13 @@ class ListBackupsRequest(proto.Message): Fields supported are: - - name - - source_table - - expire_time - - start_time - - end_time - - size_bytes - - state + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state For example, "start_time". The default sorting order is ascending. To specify descending order for the field, a @@ -1282,7 +1372,7 @@ class CopyBackupRequest(proto.Message): Attributes: parent (str): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exists. + contain the backup copy. The cluster must already exist. Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. backup_id (str): @@ -1291,7 +1381,7 @@ class CopyBackupRequest(proto.Message): to create the full backup name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. source_backup (str): Required. The source backup to be copied from. The source backup needs to be in READY state for it to be copied. @@ -1361,4 +1451,516 @@ class CopyBackupMetadata(proto.Message): ) +class CreateAuthorizedViewRequest(proto.Message): + r"""The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + + Attributes: + parent (str): + Required. This is the name of the table the AuthorizedView + belongs to. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + authorized_view_id (str): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the full + AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + authorized_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + authorized_view: gba_table.AuthorizedView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.AuthorizedView, + ) + + +class CreateAuthorizedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateAuthorizedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest): + The request that prompted the initiation of + this CreateAuthorizedView operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: "CreateAuthorizedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateAuthorizedViewRequest", + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class ListAuthorizedViewsRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Attributes: + parent (str): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + page_size (int): + Optional. Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + Optional. The value of ``next_page_token`` returned by a + previous call. + view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): + Optional. The resource_view to be applied to the returned + AuthorizedViews' fields. Default to NAME_ONLY. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + view: gba_table.AuthorizedView.ResponseView = proto.Field( + proto.ENUM, + number=4, + enum=gba_table.AuthorizedView.ResponseView, + ) + + +class ListAuthorizedViewsResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Attributes: + authorized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.AuthorizedView]): + The AuthorizedViews present in the requested + table. + next_page_token (str): + Set if not all tables could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + authorized_views: MutableSequence[gba_table.AuthorizedView] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.AuthorizedView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAuthorizedViewRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] + + Attributes: + name (str): + Required. The unique name of the requested AuthorizedView. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): + Optional. The resource_view to be applied to the returned + AuthorizedView's fields. Default to BASIC. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + view: gba_table.AuthorizedView.ResponseView = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.AuthorizedView.ResponseView, + ) + + +class UpdateAuthorizedViewRequest(proto.Message): + r"""The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + + Attributes: + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the AuthorizedView. + AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. A mask specifying + which fields in the AuthorizedView resource should be + updated. This mask is relative to the AuthorizedView + resource, not to the request message. A field will be + overwritten if it is in the mask. If empty, all fields set + in the request will be overwritten. A special value ``*`` + means to overwrite all fields (including fields not set in + the request). + ignore_warnings (bool): + Optional. If true, ignore the safety checks + when updating the AuthorizedView. + """ + + authorized_view: gba_table.AuthorizedView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.AuthorizedView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class UpdateAuthorizedViewMetadata(proto.Message): + r"""Metadata for the google.longrunning.Operation returned by + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest): + The request that prompted the initiation of + this UpdateAuthorizedView operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: "UpdateAuthorizedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateAuthorizedViewRequest", + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteAuthorizedViewRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] + + Attributes: + name (str): + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + etag (str): + Optional. The current etag of the + AuthorizedView. If an etag is provided and does + not match the current etag of the + AuthorizedView, deletion will be blocked and an + ABORTED error will be returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateSchemaBundleRequest(proto.Message): + r"""The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + + Attributes: + parent (str): + Required. The parent resource where this schema bundle will + be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + schema_bundle_id (str): + Required. The unique ID to use for the schema + bundle, which will become the final component of + the schema bundle's resource name. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + schema_bundle_id: str = proto.Field( + proto.STRING, + number=2, + ) + schema_bundle: gba_table.SchemaBundle = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.SchemaBundle, + ) + + +class CreateSchemaBundleMetadata(proto.Message): + r"""The metadata for the Operation returned by + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + + Attributes: + name (str): + The unique name identifying this schema bundle. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class UpdateSchemaBundleRequest(proto.Message): + r"""The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + + Attributes: + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to update. + + The schema bundle's ``name`` field is used to identify the + schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + ignore_warnings (bool): + Optional. If set, ignore the safety checks + when updating the Schema Bundle. The safety + checks are: + + - The new Schema Bundle is backwards compatible + with the existing Schema Bundle. + """ + + schema_bundle: gba_table.SchemaBundle = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.SchemaBundle, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class UpdateSchemaBundleMetadata(proto.Message): + r"""The metadata for the Operation returned by + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + + Attributes: + name (str): + The unique name identifying this schema bundle. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetSchemaBundleRequest(proto.Message): + r"""The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + + Attributes: + name (str): + Required. The unique name of the schema bundle to retrieve. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSchemaBundlesRequest(proto.Message): + r"""The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Attributes: + parent (str): + Required. The parent, which owns this collection of schema + bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + page_size (int): + The maximum number of schema bundles to + return. If the value is positive, the server may + return at most this value. If unspecified, the + server will return the maximum allowed page + size. + page_token (str): + A page token, received from a previous ``ListSchemaBundles`` + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + ``ListSchemaBundles`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListSchemaBundlesResponse(proto.Message): + r"""The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Attributes: + schema_bundles (MutableSequence[google.cloud.bigtable_admin_v2.types.SchemaBundle]): + The schema bundles from the specified table. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + schema_bundles: MutableSequence[gba_table.SchemaBundle] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.SchemaBundle, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSchemaBundleRequest(proto.Message): + r"""The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + + Attributes: + name (str): + Required. The unique name of the schema bundle to delete. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + etag (str): + Optional. The etag of the schema bundle. + If this is provided, it must match the server's + etag. The server returns an ABORTED error on a + mismatched etag. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/common.py b/google/cloud/bigtable_admin_v2/types/common.py index 959b9deb1..7b05e5ff5 100644 --- a/google/cloud/bigtable_admin_v2/types/common.py +++ b/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py index 6ae9159d0..f07414d56 100644 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -32,6 +32,8 @@ "Cluster", "AppProfile", "HotTablet", + "LogicalView", + "MaterializedView", }, ) @@ -55,7 +57,8 @@ class Instance(proto.Message): any time, but should be kept globally unique to avoid confusion. state (google.cloud.bigtable_admin_v2.types.Instance.State): - (``OutputOnly``) The current state of the instance. + Output only. The current state of the + instance. type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): The type of the instance. Defaults to ``PRODUCTION``. labels (MutableMapping[str, str]): @@ -64,24 +67,42 @@ class Instance(proto.Message): customer's organizational needs and deployment strategies. They can be used to filter resources and aggregate metrics. - - Label keys must be between 1 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - - Keys and values must both be under 128 bytes. + - Label keys must be between 1 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given + resource. + - Keys and values must both be under 128 bytes. create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. A server-assigned timestamp representing when - this Instance was created. For instances created before this + Output only. A commit timestamp representing when this + Instance was created. For instances created before this field was added (August 2021), this value is ``seconds: 0, nanos: 1``. satisfies_pzs (bool): Output only. Reserved for future use. This field is a member of `oneof`_ ``_satisfies_pzs``. + satisfies_pzi (bool): + Output only. Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzi``. + tags (MutableMapping[str, str]): + Optional. Input only. Immutable. Tag + keys/values directly bound to this resource. For + example: + + - "123/environment": "production", + - "123/costCenter": "marketing" + + Tags and Labels (above) are both used to bind + metadata to resources, with different use-cases. + See + https://cloud.google.com/resource-manager/docs/tags/tags-overview + for an in-depth overview on the difference + between tags and labels. """ class State(proto.Enum): @@ -157,6 +178,16 @@ class Type(proto.Enum): number=8, optional=True, ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=11, + optional=True, + ) + tags: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) class AutoscalingTargets(proto.Message): @@ -173,7 +204,7 @@ class AutoscalingTargets(proto.Message): The storage utilization that the Autoscaler should be trying to achieve. This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and - 16384 (16TiB) for an HDD cluster; otherwise it will return + 16384 (16TiB) for an HDD cluster, otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, it will be treated as if it were set to the default value: 2560 for SSD, 8192 for HDD. @@ -234,9 +265,13 @@ class Cluster(proto.Message): Output only. The current state of the cluster. serve_nodes (int): - The number of nodes allocated to this - cluster. More nodes enable higher throughput and - more consistent performance. + The number of nodes in the cluster. If no + value is set, Cloud Bigtable automatically + allocates nodes based on your data footprint and + optimized for 50% storage utilization. + node_scaling_factor (google.cloud.bigtable_admin_v2.types.Cluster.NodeScalingFactor): + Immutable. The node scaling factor of this + cluster. cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): Configuration for this cluster. @@ -284,6 +319,28 @@ class State(proto.Enum): RESIZING = 3 DISABLED = 4 + class NodeScalingFactor(proto.Enum): + r"""Possible node scaling factors of the clusters. Node scaling + delivers better latency and more throughput by removing node + boundaries. + + Values: + NODE_SCALING_FACTOR_UNSPECIFIED (0): + No node scaling specified. Defaults to + NODE_SCALING_FACTOR_1X. + NODE_SCALING_FACTOR_1X (1): + The cluster is running with a scaling factor + of 1. + NODE_SCALING_FACTOR_2X (2): + The cluster is running with a scaling factor of 2. All node + count values must be in increments of 2 with this scaling + factor enabled, otherwise an INVALID_ARGUMENT error will be + returned. + """ + NODE_SCALING_FACTOR_UNSPECIFIED = 0 + NODE_SCALING_FACTOR_1X = 1 + NODE_SCALING_FACTOR_2X = 2 + class ClusterAutoscalingConfig(proto.Message): r"""Autoscaling config for a cluster. @@ -336,9 +393,8 @@ class EncryptionConfig(proto.Message): ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK - key must match the region of the cluster. - 3) All clusters within an instance must use the same CMEK - key. Values are of the form + key must match the region of the cluster. Values are of + the form ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` """ @@ -364,6 +420,11 @@ class EncryptionConfig(proto.Message): proto.INT32, number=4, ) + node_scaling_factor: NodeScalingFactor = proto.Field( + proto.ENUM, + number=9, + enum=NodeScalingFactor, + ) cluster_config: ClusterConfig = proto.Field( proto.MESSAGE, number=7, @@ -419,8 +480,48 @@ class AppProfile(proto.Message): Use a single-cluster routing policy. This field is a member of `oneof`_ ``routing_policy``. + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + This field has been deprecated in favor of + ``standard_isolation.priority``. If you set this field, + ``standard_isolation.priority`` will be set instead. + + The priority of requests sent using this app profile. + + This field is a member of `oneof`_ ``isolation``. + standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): + The standard options used for isolating this + app profile's traffic from other use cases. + + This field is a member of `oneof`_ ``isolation``. + data_boost_isolation_read_only (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly): + Specifies that this app profile is intended + for read-only usage via the Data Boost feature. + + This field is a member of `oneof`_ ``isolation``. """ + class Priority(proto.Enum): + r"""Possible priorities for an app profile. Note that higher + priority writes can sometimes queue behind lower priority writes + to the same tablet, as writes must be strictly sequenced in the + durability log. + + Values: + PRIORITY_UNSPECIFIED (0): + Default value. Mapped to PRIORITY_HIGH (the legacy behavior) + on creation. + PRIORITY_LOW (1): + No description available. + PRIORITY_MEDIUM (2): + No description available. + PRIORITY_HIGH (3): + No description available. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + class MultiClusterRoutingUseAny(proto.Message): r"""Read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is @@ -428,18 +529,47 @@ class MultiClusterRoutingUseAny(proto.Message): in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: cluster_ids (MutableSequence[str]): The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible. + row_affinity (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny.RowAffinity): + Row affinity sticky routing based on the row + key of the request. Requests that span multiple + rows are routed non-deterministically. + + This field is a member of `oneof`_ ``affinity``. """ + class RowAffinity(proto.Message): + r"""If enabled, Bigtable will route the request based on the row + key of the request, rather than randomly. Instead, each row key + will be assigned to a cluster, and will stick to that cluster. + If clusters are added or removed, then this may affect which row + keys stick to which clusters. To avoid this, users can use a + cluster group to specify which clusters are to be used. In this + case, new clusters that are not a part of the cluster group will + not be routed to, and routing will be unaffected by the new + cluster. Moreover, clusters specified in the cluster group + cannot be deleted unless removed from the cluster group. + + """ + cluster_ids: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + row_affinity: "AppProfile.MultiClusterRoutingUseAny.RowAffinity" = proto.Field( + proto.MESSAGE, + number=3, + oneof="affinity", + message="AppProfile.MultiClusterRoutingUseAny.RowAffinity", + ) class SingleClusterRouting(proto.Message): r"""Unconditionally routes all read/write requests to a specific @@ -466,6 +596,63 @@ class SingleClusterRouting(proto.Message): number=2, ) + class StandardIsolation(proto.Message): + r"""Standard options for isolating this app profile's traffic + from other use cases. + + Attributes: + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + The priority of requests sent using this app + profile. + """ + + priority: "AppProfile.Priority" = proto.Field( + proto.ENUM, + number=1, + enum="AppProfile.Priority", + ) + + class DataBoostIsolationReadOnly(proto.Message): + r"""Data Boost is a serverless compute capability that lets you + run high-throughput read jobs and queries on your Bigtable data, + without impacting the performance of the clusters that handle + your application traffic. Data Boost supports read-only use + cases with single-cluster routing. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + compute_billing_owner (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner): + The Compute Billing Owner for this Data Boost + App Profile. + + This field is a member of `oneof`_ ``_compute_billing_owner``. + """ + + class ComputeBillingOwner(proto.Enum): + r"""Compute Billing Owner specifies how usage should be accounted + when using Data Boost. Compute Billing Owner also configures + which Cloud Project is charged for relevant quota. + + Values: + COMPUTE_BILLING_OWNER_UNSPECIFIED (0): + Unspecified value. + HOST_PAYS (1): + The host Cloud Project containing the + targeted Bigtable Instance / Table pays for + compute. + """ + COMPUTE_BILLING_OWNER_UNSPECIFIED = 0 + HOST_PAYS = 1 + + compute_billing_owner: "AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner" = proto.Field( + proto.ENUM, + number=1, + optional=True, + enum="AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner", + ) + name: str = proto.Field( proto.STRING, number=1, @@ -490,6 +677,24 @@ class SingleClusterRouting(proto.Message): oneof="routing_policy", message=SingleClusterRouting, ) + priority: Priority = proto.Field( + proto.ENUM, + number=7, + oneof="isolation", + enum=Priority, + ) + standard_isolation: StandardIsolation = proto.Field( + proto.MESSAGE, + number=11, + oneof="isolation", + message=StandardIsolation, + ) + data_boost_isolation_read_only: DataBoostIsolationReadOnly = proto.Field( + proto.MESSAGE, + number=10, + oneof="isolation", + message=DataBoostIsolationReadOnly, + ) class HotTablet(proto.Message): @@ -557,4 +762,84 @@ class HotTablet(proto.Message): ) +class LogicalView(proto.Message): + r"""A SQL logical view object that can be referenced in SQL + queries. + + Attributes: + name (str): + Identifier. The unique name of the logical view. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}`` + query (str): + Required. The logical view's select query. + etag (str): + Optional. The etag for this logical view. + This may be sent on update requests to ensure + that the client has an up-to-date value before + proceeding. The server returns an ABORTED error + on a mismatched etag. + deletion_protection (bool): + Optional. Set to true to make the LogicalView + protected against deletion. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=6, + ) + + +class MaterializedView(proto.Message): + r"""A materialized view object that can be referenced in SQL + queries. + + Attributes: + name (str): + Identifier. The unique name of the materialized view. + Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}`` + query (str): + Required. Immutable. The materialized view's + select query. + etag (str): + Optional. The etag for this materialized + view. This may be sent on update requests to + ensure that the client has an up-to-date value + before proceeding. The server returns an ABORTED + error on a mismatched etag. + deletion_protection (bool): + Set to true to make the MaterializedView + protected against deletion. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=6, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index 57bd1b00f..c4f23d5fa 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ import proto # type: ignore +from google.cloud.bigtable_admin_v2.types import types +from google.cloud.bigtable_admin_v2.utils import oneof_message from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -31,12 +33,17 @@ "RestoreInfo", "ChangeStreamConfig", "Table", + "AuthorizedView", "ColumnFamily", "GcRule", "EncryptionInfo", "Snapshot", "Backup", "BackupInfo", + "TieredStorageConfig", + "TieredStorageRule", + "ProtoSchema", + "SchemaBundle", }, ) @@ -109,6 +116,9 @@ class Table(proto.Message): timestamp. Each table is served using the resources of its parent cluster. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): The unique name of the table. Values are of the form @@ -146,12 +156,82 @@ class Table(proto.Message): i.e. deleting the following resources through Admin APIs are prohibited: - - The table. - - The column families in the table. - - The instance containing the table. + - The table. + - The column families in the table. + - The instance containing the table. Note one can still delete the data stored in the table through Data APIs. + automated_backup_policy (google.cloud.bigtable_admin_v2.types.Table.AutomatedBackupPolicy): + If specified, automated backups are enabled + for this table. Otherwise, automated backups are + disabled. + + This field is a member of `oneof`_ ``automated_backup_config``. + tiered_storage_config (google.cloud.bigtable_admin_v2.types.TieredStorageConfig): + Rules to specify what data is stored in each + storage tier. Different tiers store data + differently, providing different trade-offs + between cost and performance. Different parts of + a table can be stored separately on different + tiers. + If a config is specified, tiered storage is + enabled for this table. Otherwise, tiered + storage is disabled. + Only SSD instances can configure tiered storage. + row_key_schema (google.cloud.bigtable_admin_v2.types.Type.Struct): + The row key schema for this table. The schema is used to + decode the raw row key bytes into a structured format. The + order of field declarations in this schema is important, as + it reflects how the raw row key bytes are structured. + Currently, this only affects how the key is read via a + GoogleSQL query from the ExecuteQuery API. + + For a SQL query, the \_key column is still read as raw + bytes. But queries can reference the key fields by name, + which will be decoded from \_key using provided type and + encoding. Queries that reference key fields will fail if + they encounter an invalid row key. + + For example, if \_key = + "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" with the following + schema: { fields { field_name: "id" type { string { + encoding: utf8_bytes {} } } } fields { field_name: "date" + type { string { encoding: utf8_bytes {} } } } fields { + field_name: "product_code" type { int64 { encoding: + big_endian_bytes {} } } } encoding { delimited_bytes { + delimiter: "#" } } } + + | The decoded key parts would be: id = "some_id", date = + "2024-04-30", product_code = 1245427 The query "SELECT + \_key, product_code FROM table" will return two columns: + /------------------------------------------------------ + | \| \_key \| product_code \| \| + --------------------------------------\|--------------\| + \| "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" \| 1245427 \| + ------------------------------------------------------/ + + The schema has the following invariants: (1) The decoded + field values are order-preserved. For read, the field values + will be decoded in sorted mode from the raw bytes. (2) Every + field in the schema must specify a non-empty name. (3) Every + field must specify a type with an associated encoding. The + type is limited to scalar types only: Array, Map, Aggregate, + and Struct are not allowed. (4) The field names must not + collide with existing column family names and reserved + keywords "\_key" and "\_timestamp". + + The following update operations are allowed for + row_key_schema: + + - Update from an empty schema to a new schema. + - Remove the existing schema. This operation requires + setting the ``ignore_warnings`` flag to ``true``, since it + might be a backward incompatible change. Without the flag, + the update request will fail with an INVALID_ARGUMENT + error. Any other row key schema update operation (e.g. + update existing schema columns names or types) is + currently unsupported. """ class TimestampGranularity(proto.Enum): @@ -266,6 +346,31 @@ class ReplicationState(proto.Enum): message="EncryptionInfo", ) + class AutomatedBackupPolicy(proto.Message): + r"""Defines an automated backup policy for a table + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + Required. How long the automated backups + should be retained. The only supported value at + this time is 3 days. + frequency (google.protobuf.duration_pb2.Duration): + Required. How frequently automated backups + should occur. The only supported value at this + time is 24 hours. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + frequency: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + name: str = proto.Field( proto.STRING, number=1, @@ -301,6 +406,147 @@ class ReplicationState(proto.Enum): proto.BOOL, number=9, ) + automated_backup_policy: AutomatedBackupPolicy = proto.Field( + proto.MESSAGE, + number=13, + oneof="automated_backup_config", + message=AutomatedBackupPolicy, + ) + tiered_storage_config: "TieredStorageConfig" = proto.Field( + proto.MESSAGE, + number=14, + message="TieredStorageConfig", + ) + row_key_schema: types.Type.Struct = proto.Field( + proto.MESSAGE, + number=15, + message=types.Type.Struct, + ) + + +class AuthorizedView(proto.Message): + r"""AuthorizedViews represent subsets of a particular Cloud + Bigtable table. Users can configure access to each Authorized + View independently from the table and use the existing Data APIs + to access the subset of data. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Identifier. The name of this AuthorizedView. Values are of + the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}`` + subset_view (google.cloud.bigtable_admin_v2.types.AuthorizedView.SubsetView): + An AuthorizedView permitting access to an + explicit subset of a Table. + + This field is a member of `oneof`_ ``authorized_view``. + etag (str): + The etag for this AuthorizedView. + If this is provided on update, it must match the + server's etag. The server returns ABORTED error + on a mismatched etag. + deletion_protection (bool): + Set to true to make the AuthorizedView + protected against deletion. The parent Table and + containing Instance cannot be deleted if an + AuthorizedView has this bit set. + """ + + class ResponseView(proto.Enum): + r"""Defines a subset of an AuthorizedView's fields. + + Values: + RESPONSE_VIEW_UNSPECIFIED (0): + Uses the default view for each method as + documented in the request. + NAME_ONLY (1): + Only populates ``name``. + BASIC (2): + Only populates the AuthorizedView's basic metadata. This + includes: name, deletion_protection, etag. + FULL (3): + Populates every fields. + """ + RESPONSE_VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + BASIC = 2 + FULL = 3 + + class FamilySubsets(proto.Message): + r"""Subsets of a column family that are included in this + AuthorizedView. + + Attributes: + qualifiers (MutableSequence[bytes]): + Individual exact column qualifiers to be + included in the AuthorizedView. + qualifier_prefixes (MutableSequence[bytes]): + Prefixes for qualifiers to be included in the + AuthorizedView. Every qualifier starting with + one of these prefixes is included in the + AuthorizedView. To provide access to all + qualifiers, include the empty string as a prefix + (""). + """ + + qualifiers: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=1, + ) + qualifier_prefixes: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=2, + ) + + class SubsetView(proto.Message): + r"""Defines a simple AuthorizedView that is a subset of the + underlying Table. + + Attributes: + row_prefixes (MutableSequence[bytes]): + Row prefixes to be included in the + AuthorizedView. To provide access to all rows, + include the empty string as a prefix (""). + family_subsets (MutableMapping[str, google.cloud.bigtable_admin_v2.types.AuthorizedView.FamilySubsets]): + Map from column family name to the columns in + this family to be included in the + AuthorizedView. + """ + + row_prefixes: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=1, + ) + family_subsets: MutableMapping[ + str, "AuthorizedView.FamilySubsets" + ] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message="AuthorizedView.FamilySubsets", + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + subset_view: SubsetView = proto.Field( + proto.MESSAGE, + number=2, + oneof="authorized_view", + message=SubsetView, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=4, + ) class ColumnFamily(proto.Message): @@ -316,6 +562,20 @@ class ColumnFamily(proto.Message): opportunistically in the background, and so it's possible for reads to return a cell even if it matches the active GC expression for its family. + value_type (google.cloud.bigtable_admin_v2.types.Type): + The type of data stored in each of this family's cell + values, including its full encoding. If omitted, the family + only serves raw untyped bytes. + + For now, only the ``Aggregate`` type is supported. + + ``Aggregate`` can only be set at family creation and is + immutable afterwards. + + If ``value_type`` is ``Aggregate``, written data must be + compatible with: + + - ``value_type.input_type`` for ``AddInput`` mutations """ gc_rule: "GcRule" = proto.Field( @@ -323,9 +583,14 @@ class ColumnFamily(proto.Message): number=1, message="GcRule", ) + value_type: types.Type = proto.Field( + proto.MESSAGE, + number=3, + message=types.Type, + ) -class GcRule(proto.Message): +class GcRule(oneof_message.OneofMessage): r"""Rule for determining which cells to delete during garbage collection. @@ -595,13 +860,18 @@ class Backup(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The expiration time of the backup, with - microseconds granularity that must be at least 6 hours and - at most 90 days from the time the request is received. Once - the ``expire_time`` has passed, Cloud Bigtable will delete - the backup and free the resources used by the backup. + Required. The expiration time of the backup. When creating a + backup or updating its ``expire_time``, the value must be + greater than the backup creation time by: + + - At least 6 hours + - At most 90 days + + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. ``start_time`` is the time that the backup was started (i.e. approximately the time the @@ -619,6 +889,20 @@ class Backup(proto.Message): encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): Output only. The encryption information for the backup. + backup_type (google.cloud.bigtable_admin_v2.types.Backup.BackupType): + Indicates the backup type of the backup. + hot_to_standard_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the hot backup will be converted to a + standard backup. Once the ``hot_to_standard_time`` has + passed, Cloud Bigtable will convert the hot backup to a + standard backup. This value must be greater than the backup + creation time by: + + - At least 24 hours + + This field only applies for hot backups. When creating or + updating a standard backup, attempting to set this field + will fail the request. """ class State(proto.Enum): @@ -637,6 +921,28 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class BackupType(proto.Enum): + r"""The type of the backup. + + Values: + BACKUP_TYPE_UNSPECIFIED (0): + Not specified. + STANDARD (1): + The default type for Cloud Bigtable managed + backups. Supported for backups created in both + HDD and SSD instances. Requires optimization + when restored to a table in an SSD instance. + HOT (2): + A backup type with faster restore to SSD + performance. Only supported for backups created + in SSD instances. A new SSD table restored from + a hot backup reaches production performance more + quickly than a standard backup. + """ + BACKUP_TYPE_UNSPECIFIED = 0 + STANDARD = 1 + HOT = 2 + name: str = proto.Field( proto.STRING, number=1, @@ -678,6 +984,16 @@ class State(proto.Enum): number=9, message="EncryptionInfo", ) + backup_type: BackupType = proto.Field( + proto.ENUM, + number=11, + enum=BackupType, + ) + hot_to_standard_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) class BackupInfo(proto.Message): @@ -702,7 +1018,8 @@ class BackupInfo(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ """ backup: str = proto.Field( @@ -729,4 +1046,116 @@ class BackupInfo(proto.Message): ) +class TieredStorageConfig(proto.Message): + r"""Config for tiered storage. + A valid config must have a valid TieredStorageRule. Otherwise + the whole TieredStorageConfig must be unset. + By default all data is stored in the SSD tier (only SSD + instances can configure tiered storage). + + Attributes: + infrequent_access (google.cloud.bigtable_admin_v2.types.TieredStorageRule): + Rule to specify what data is stored in the + infrequent access(IA) tier. The IA tier allows + storing more data per node with reduced + performance. + """ + + infrequent_access: "TieredStorageRule" = proto.Field( + proto.MESSAGE, + number=1, + message="TieredStorageRule", + ) + + +class TieredStorageRule(proto.Message): + r"""Rule to specify what data is stored in a storage tier. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + include_if_older_than (google.protobuf.duration_pb2.Duration): + Include cells older than the given age. + For the infrequent access tier, this value must + be at least 30 days. + + This field is a member of `oneof`_ ``rule``. + """ + + include_if_older_than: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + oneof="rule", + message=duration_pb2.Duration, + ) + + +class ProtoSchema(proto.Message): + r"""Represents a protobuf schema. + + Attributes: + proto_descriptors (bytes): + Required. Contains a protobuf-serialized + `google.protobuf.FileDescriptorSet `__, + which could include multiple proto files. To generate it, + `install `__ and + run ``protoc`` with ``--include_imports`` and + ``--descriptor_set_out``. For example, to generate for + moon/shot/app.proto, run + + :: + + $protoc --proto_path=/app_path --proto_path=/lib_path \ + --include_imports \ + --descriptor_set_out=descriptors.pb \ + moon/shot/app.proto + + For more details, see protobuffer `self + description `__. + """ + + proto_descriptors: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class SchemaBundle(proto.Message): + r"""A named collection of related schemas. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Identifier. The unique name identifying this schema bundle. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + proto_schema (google.cloud.bigtable_admin_v2.types.ProtoSchema): + Schema for Protobufs. + + This field is a member of `oneof`_ ``type``. + etag (str): + Optional. The etag for this schema bundle. + This may be sent on update and delete requests + to ensure the client has an up-to-date value + before proceeding. The server returns an ABORTED + error on a mismatched etag. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + proto_schema: "ProtoSchema" = proto.Field( + proto.MESSAGE, + number=2, + oneof="type", + message="ProtoSchema", + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/types.py b/google/cloud/bigtable_admin_v2/types/types.py new file mode 100644 index 000000000..4f56429da --- /dev/null +++ b/google/cloud/bigtable_admin_v2/types/types.py @@ -0,0 +1,841 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "Type", + }, +) + + +class Type(proto.Message): + r"""``Type`` represents the type of data that is written to, read from, + or stored in Bigtable. It is heavily based on the GoogleSQL standard + to help maintain familiarity and consistency across products and + features. + + For compatibility with Bigtable's existing untyped APIs, each + ``Type`` includes an ``Encoding`` which describes how to convert to + or from the underlying data. + + Each encoding can operate in one of two modes: + + - Sorted: In this mode, Bigtable guarantees that + ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is + useful anywhere sort order is important, for example when encoding + keys. + - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` + then ``Encode(X) != Encode(Y)``. However, the converse is not + guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and + "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON + value. + + The API clearly documents which mode is used wherever an encoding + can be configured. Each encoding also documents which values are + supported in which modes. For example, when encoding INT64 as a + numeric STRING, negative numbers cannot be encoded in sorted mode. + This is because ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001")``. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): + Bytes + + This field is a member of `oneof`_ ``kind``. + string_type (google.cloud.bigtable_admin_v2.types.Type.String): + String + + This field is a member of `oneof`_ ``kind``. + int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): + Int64 + + This field is a member of `oneof`_ ``kind``. + float32_type (google.cloud.bigtable_admin_v2.types.Type.Float32): + Float32 + + This field is a member of `oneof`_ ``kind``. + float64_type (google.cloud.bigtable_admin_v2.types.Type.Float64): + Float64 + + This field is a member of `oneof`_ ``kind``. + bool_type (google.cloud.bigtable_admin_v2.types.Type.Bool): + Bool + + This field is a member of `oneof`_ ``kind``. + timestamp_type (google.cloud.bigtable_admin_v2.types.Type.Timestamp): + Timestamp + + This field is a member of `oneof`_ ``kind``. + date_type (google.cloud.bigtable_admin_v2.types.Type.Date): + Date + + This field is a member of `oneof`_ ``kind``. + aggregate_type (google.cloud.bigtable_admin_v2.types.Type.Aggregate): + Aggregate + + This field is a member of `oneof`_ ``kind``. + struct_type (google.cloud.bigtable_admin_v2.types.Type.Struct): + Struct + + This field is a member of `oneof`_ ``kind``. + array_type (google.cloud.bigtable_admin_v2.types.Type.Array): + Array + + This field is a member of `oneof`_ ``kind``. + map_type (google.cloud.bigtable_admin_v2.types.Type.Map): + Map + + This field is a member of `oneof`_ ``kind``. + proto_type (google.cloud.bigtable_admin_v2.types.Type.Proto): + Proto + + This field is a member of `oneof`_ ``kind``. + enum_type (google.cloud.bigtable_admin_v2.types.Type.Enum): + Enum + + This field is a member of `oneof`_ ``kind``. + """ + + class Bytes(proto.Message): + r"""Bytes Values of type ``Bytes`` are stored in ``Value.bytes_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + raw (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding.Raw): + Use ``Raw`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Raw(proto.Message): + r"""Leaves the value as-is. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + """ + + raw: "Type.Bytes.Encoding.Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Bytes.Encoding.Raw", + ) + + encoding: "Type.Bytes.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes.Encoding", + ) + + class String(proto.Message): + r"""String Values of type ``String`` are stored in + ``Value.string_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.String.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + utf8_raw (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Raw): + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. + utf8_bytes (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Bytes): + Use ``Utf8Bytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + + class Utf8Bytes(proto.Message): + r"""UTF-8 encoding. + + Sorted mode: + + - All values are supported. + - Code point order is preserved. + + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` + + """ + + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.String.Encoding.Utf8Raw", + ) + utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.String.Encoding.Utf8Bytes", + ) + + encoding: "Type.String.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.String.Encoding", + ) + + class Int64(proto.Message): + r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + big_endian_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.BigEndianBytes): + Use ``BigEndianBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.OrderedCodeBytes): + Use ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class BigEndianBytes(proto.Message): + r"""Encodes the value as an 8-byte big-endian two's complement value. + + Sorted mode: non-negative values are supported. + + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + + Attributes: + bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): + Deprecated: ignored if set. + """ + + bytes_type: "Type.Bytes" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes", + ) + + class OrderedCodeBytes(proto.Message): + r"""Encodes the value in a variable length binary format of up to + 10 bytes. Values that are closer to zero use fewer bytes. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + """ + + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding.BigEndianBytes", + ) + ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Int64.Encoding.OrderedCodeBytes", + ) + + encoding: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Int64.Encoding", + ) + + class Bool(proto.Message): + r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" + + class Float32(proto.Message): + r"""Float32 Values of type ``Float32`` are stored in + ``Value.float_value``. + + """ + + class Float64(proto.Message): + r"""Float64 Values of type ``Float64`` are stored in + ``Value.float_value``. + + """ + + class Timestamp(proto.Message): + r"""Timestamp Values of type ``Timestamp`` are stored in + ``Value.timestamp_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Timestamp.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + unix_micros_int64 (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): + Encodes the number of microseconds since the Unix epoch + using the given ``Int64`` encoding. Values must be + microsecond-aligned. + + Compatible with: + + - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` + + This field is a member of `oneof`_ ``encoding``. + """ + + unix_micros_int64: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding", + ) + + encoding: "Type.Timestamp.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Timestamp.Encoding", + ) + + class Date(proto.Message): + r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" + + class Struct(proto.Message): + r"""A structured data value, consisting of fields which map to + dynamically typed values. Values of type ``Struct`` are stored in + ``Value.array_value`` where entries are in the same order and number + as ``field_types``. + + Attributes: + fields (MutableSequence[google.cloud.bigtable_admin_v2.types.Type.Struct.Field]): + The names and types of the fields in this + struct. + encoding (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Field(proto.Message): + r"""A struct field and its type. + + Attributes: + field_name (str): + The field name (optional). Fields without a ``field_name`` + are considered anonymous and cannot be referenced by name. + type_ (google.cloud.bigtable_admin_v2.types.Type): + The type of values in this field. + """ + + field_name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + singleton (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.Singleton): + Use ``Singleton`` encoding. + + This field is a member of `oneof`_ ``encoding``. + delimited_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.DelimitedBytes): + Use ``DelimitedBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.OrderedCodeBytes): + User ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Singleton(proto.Message): + r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if + ``fields.size == 1``. + + """ + + class DelimitedBytes(proto.Message): + r"""Fields are encoded independently and concatenated with a + configurable ``delimiter`` in between. + + A struct with no fields defined is encoded as a single + ``delimiter``. + + Sorted mode: + + - Fields are encoded in sorted mode. + - Encoded field values must not contain any bytes <= + ``delimiter[0]`` + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - Encoded field values must not contain ``delimiter[0]``. + + Attributes: + delimiter (bytes): + Byte sequence used to delimit concatenated + fields. The delimiter must contain at least 1 + character and at most 50 characters. + """ + + delimiter: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + class OrderedCodeBytes(proto.Message): + r"""Fields are encoded independently and concatenated with the fixed + byte pair {0x00, 0x01} in between. + + Any null (0x00) byte in an encoded field is replaced by the fixed + byte pair {0x00, 0xFF}. + + Fields that encode to the empty string "" have special handling: + + - If *every* field encodes to "", or if the STRUCT has no fields + defined, then the STRUCT is encoded as the fixed byte pair {0x00, + 0x00}. + - Otherwise, the STRUCT only encodes until the last non-empty field, + omitting any trailing empty fields. Any empty fields that aren't + omitted are replaced with the fixed byte pair {0x00, 0x00}. + + Examples: + + - STRUCT() -> "\\00\\00" + - STRUCT("") -> "\\00\\00" + - STRUCT("", "") -> "\\00\\00" + - STRUCT("", "B") -> "\\00\\00" + "\\00\\01" + "B" + - STRUCT("A", "") -> "A" + - STRUCT("", "B", "") -> "\\00\\00" + "\\00\\01" + "B" + - STRUCT("A", "", "C") -> "A" + "\\00\\01" + "\\00\\00" + "\\00\\01" + + "C" + + Since null bytes are always escaped, this encoding can cause size + blowup for encodings like ``Int64.BigEndianBytes`` that are likely + to produce many such bytes. + + Sorted mode: + + - Fields are encoded in sorted mode. + - All values supported by the field encodings are allowed + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - All values supported by the field encodings are allowed. + + """ + + singleton: "Type.Struct.Encoding.Singleton" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Struct.Encoding.Singleton", + ) + delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Struct.Encoding.DelimitedBytes", + ) + ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=3, + oneof="encoding", + message="Type.Struct.Encoding.OrderedCodeBytes", + ) + + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Type.Struct.Field", + ) + encoding: "Type.Struct.Encoding" = proto.Field( + proto.MESSAGE, + number=2, + message="Type.Struct.Encoding", + ) + + class Proto(proto.Message): + r"""A protobuf message type. Values of type ``Proto`` are stored in + ``Value.bytes_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this proto + is defined in. + message_name (str): + The fully qualified name of the protobuf + message, including package. In the format of + "foo.bar.Message". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + message_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Enum(proto.Message): + r"""A protobuf enum type. Values of type ``Enum`` are stored in + ``Value.int_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this enum is + defined in. + enum_name (str): + The fully qualified name of the protobuf enum + message, including package. In the format of + "foo.bar.EnumMessage". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + enum_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Array(proto.Message): + r"""An ordered list of elements of a given type. Values of type + ``Array`` are stored in ``Value.array_value``. + + Attributes: + element_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the elements in the array. This must not be + ``Array``. + """ + + element_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + + class Map(proto.Message): + r"""A mapping of keys to values of a given type. Values of type ``Map`` + are stored in a ``Value.array_value`` where each entry is another + ``Value.array_value`` with two elements (the key and the value, in + that order). Normally encoded Map values won't have repeated keys, + however, clients are expected to handle the case in which they do. + If the same key appears multiple times, the *last* value takes + precedence. + + Attributes: + key_type (google.cloud.bigtable_admin_v2.types.Type): + The type of a map key. Only ``Bytes``, ``String``, and + ``Int64`` are allowed as key types. + value_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the values in a map. + """ + + key_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + value_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + class Aggregate(proto.Message): + r"""A value that combines incremental updates into a summarized value. + + Data is never directly written or read using type ``Aggregate``. + Writes will provide either the ``input_type`` or ``state_type``, and + reads will always return the ``state_type`` . + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + input_type (google.cloud.bigtable_admin_v2.types.Type): + Type of the inputs that are accumulated by this + ``Aggregate``, which must specify a full encoding. Use + ``AddInput`` mutations to accumulate new inputs. + state_type (google.cloud.bigtable_admin_v2.types.Type): + Output only. Type that holds the internal accumulator state + for the ``Aggregate``. This is a function of the + ``input_type`` and ``aggregator`` chosen, and will always + specify a full encoding. + sum (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Sum): + Sum aggregator. + + This field is a member of `oneof`_ ``aggregator``. + hllpp_unique_count (google.cloud.bigtable_admin_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): + HyperLogLogPlusPlusUniqueCount aggregator. + + This field is a member of `oneof`_ ``aggregator``. + max_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Max): + Max aggregator. + + This field is a member of `oneof`_ ``aggregator``. + min_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Min): + Min aggregator. + + This field is a member of `oneof`_ ``aggregator``. + """ + + class Sum(proto.Message): + r"""Computes the sum of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Max(proto.Message): + r"""Computes the max of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Min(proto.Message): + r"""Computes the min of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class HyperLogLogPlusPlusUniqueCount(proto.Message): + r"""Computes an approximate unique count over the input values. When + using raw data as input, be careful to use a consistent encoding. + Otherwise the same value encoded differently could count more than + once, or two distinct values could count as identical. Input: Any, + or omit for Raw State: TBD Special state conversions: ``Int64`` (the + unique count estimate) + + """ + + input_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + state_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + sum: "Type.Aggregate.Sum" = proto.Field( + proto.MESSAGE, + number=4, + oneof="aggregator", + message="Type.Aggregate.Sum", + ) + hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( + proto.Field( + proto.MESSAGE, + number=5, + oneof="aggregator", + message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", + ) + ) + max_: "Type.Aggregate.Max" = proto.Field( + proto.MESSAGE, + number=6, + oneof="aggregator", + message="Type.Aggregate.Max", + ) + min_: "Type.Aggregate.Min" = proto.Field( + proto.MESSAGE, + number=7, + oneof="aggregator", + message="Type.Aggregate.Min", + ) + + bytes_type: Bytes = proto.Field( + proto.MESSAGE, + number=1, + oneof="kind", + message=Bytes, + ) + string_type: String = proto.Field( + proto.MESSAGE, + number=2, + oneof="kind", + message=String, + ) + int64_type: Int64 = proto.Field( + proto.MESSAGE, + number=5, + oneof="kind", + message=Int64, + ) + float32_type: Float32 = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=Float32, + ) + float64_type: Float64 = proto.Field( + proto.MESSAGE, + number=9, + oneof="kind", + message=Float64, + ) + bool_type: Bool = proto.Field( + proto.MESSAGE, + number=8, + oneof="kind", + message=Bool, + ) + timestamp_type: Timestamp = proto.Field( + proto.MESSAGE, + number=10, + oneof="kind", + message=Timestamp, + ) + date_type: Date = proto.Field( + proto.MESSAGE, + number=11, + oneof="kind", + message=Date, + ) + aggregate_type: Aggregate = proto.Field( + proto.MESSAGE, + number=6, + oneof="kind", + message=Aggregate, + ) + struct_type: Struct = proto.Field( + proto.MESSAGE, + number=7, + oneof="kind", + message=Struct, + ) + array_type: Array = proto.Field( + proto.MESSAGE, + number=3, + oneof="kind", + message=Array, + ) + map_type: Map = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message=Map, + ) + proto_type: Proto = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=Proto, + ) + enum_type: Enum = proto.Field( + proto.MESSAGE, + number=14, + oneof="kind", + message=Enum, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/utils/__init__.py b/google/cloud/bigtable_admin_v2/utils/__init__.py new file mode 100644 index 000000000..93d766056 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/utils/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This directory is a directory for handwritten code, made for inserting +# specifically the oneof_message module into files in the autogenerated +# types directory without causing ImportErrors due to circular imports. +# For other use cases, use the overlay submodule. diff --git a/google/cloud/bigtable_admin_v2/utils/oneof_message.py b/google/cloud/bigtable_admin_v2/utils/oneof_message.py new file mode 100644 index 000000000..e110d8fa6 --- /dev/null +++ b/google/cloud/bigtable_admin_v2/utils/oneof_message.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +import collections.abc +import proto + + +class OneofMessage(proto.Message): + def _get_oneof_field_from_key(self, key): + """Given a field name, return the corresponding oneof associated with it. If it doesn't exist, return None.""" + + oneof_type = None + + try: + oneof_type = self._meta.fields[key].oneof + except KeyError: + # Underscores may be appended to field names + # that collide with python or proto-plus keywords. + # In case a key only exists with a `_` suffix, coerce the key + # to include the `_` suffix. It's not possible to + # natively define the same field with a trailing underscore in protobuf. + # See related issue + # https://github.com/googleapis/python-api-core/issues/227 + if f"{key}_" in self._meta.fields: + key = f"{key}_" + oneof_type = self._meta.fields[key].oneof + + return oneof_type + + def __init__( + self, + mapping=None, + *, + ignore_unknown_fields=False, + **kwargs, + ): + # We accept several things for `mapping`: + # * An instance of this class. + # * An instance of the underlying protobuf descriptor class. + # * A dict + # * Nothing (keyword arguments only). + # + # + # Check for oneofs collisions in the parameters provided. Extract a set of + # all fields that are set from the mappings + kwargs combined. + mapping_fields = set(kwargs.keys()) + + if mapping is None: + pass + elif isinstance(mapping, collections.abc.Mapping): + mapping_fields.update(mapping.keys()) + elif isinstance(mapping, self._meta.pb): + mapping_fields.update(field.name for field, _ in mapping.ListFields()) + elif isinstance(mapping, type(self)): + mapping_fields.update(field.name for field, _ in mapping._pb.ListFields()) + else: + # Sanity check: Did we get something not a map? Error if so. + raise TypeError( + "Invalid constructor input for %s: %r" + % ( + self.__class__.__name__, + mapping, + ) + ) + + oneofs = set() + + for field in mapping_fields: + oneof_field = self._get_oneof_field_from_key(field) + if oneof_field is not None: + if oneof_field in oneofs: + raise ValueError( + "Invalid constructor input for %s: Multiple fields defined for oneof %s" + % (self.__class__.__name__, oneof_field) + ) + else: + oneofs.add(oneof_field) + + super().__init__(mapping, ignore_unknown_fields=ignore_unknown_fields, **kwargs) + + def __setattr__(self, key, value): + # Oneof check: Only set the value of an existing oneof field + # if the field being overridden is the same as the field already set + # for the oneof. + oneof = self._get_oneof_field_from_key(key) + if ( + oneof is not None + and self._pb.HasField(oneof) + and self._pb.WhichOneof(oneof) != key + ): + raise ValueError( + "Overriding the field set for oneof %s with a different field %s" + % (oneof, key) + ) + super().__setattr__(key, value) diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 80bd4ec09..ec552a85d 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,14 +15,26 @@ # from google.cloud.bigtable_v2 import gapic_version as package_version +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .services.bigtable import BigtableClient from .services.bigtable import BigtableAsyncClient from .types.bigtable import CheckAndMutateRowRequest from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import ExecuteQueryRequest +from .types.bigtable import ExecuteQueryResponse from .types.bigtable import GenerateInitialChangeStreamPartitionsRequest from .types.bigtable import GenerateInitialChangeStreamPartitionsResponse from .types.bigtable import MutateRowRequest @@ -31,6 +43,8 @@ from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import PrepareQueryRequest +from .types.bigtable import PrepareQueryResponse from .types.bigtable import RateLimitInfo from .types.bigtable import ReadChangeStreamRequest from .types.bigtable import ReadChangeStreamResponse @@ -40,12 +54,21 @@ from .types.bigtable import ReadRowsResponse from .types.bigtable import SampleRowKeysRequest from .types.bigtable import SampleRowKeysResponse +from .types.data import ArrayValue from .types.data import Cell from .types.data import Column +from .types.data import ColumnMetadata from .types.data import ColumnRange from .types.data import Family +from .types.data import Idempotency from .types.data import Mutation +from .types.data import PartialResultSet +from .types.data import ProtoFormat +from .types.data import ProtoRows +from .types.data import ProtoRowsBatch +from .types.data import ProtoSchema from .types.data import ReadModifyWriteRule +from .types.data import ResultSetMetadata from .types.data import Row from .types.data import RowFilter from .types.data import RowRange @@ -54,34 +77,144 @@ from .types.data import StreamContinuationTokens from .types.data import StreamPartition from .types.data import TimestampRange +from .types.data import Value from .types.data import ValueRange from .types.feature_flags import FeatureFlags +from .types.peer_info import PeerInfo from .types.request_stats import FullReadStatsView from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats from .types.request_stats import RequestStats from .types.response_params import ResponseParams +from .types.types import Type + +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.bigtable_v2") # type: ignore + api_core.check_dependency_versions("google.cloud.bigtable_v2") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.bigtable_v2" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) __all__ = ( "BigtableAsyncClient", + "ArrayValue", "BigtableClient", "Cell", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", "Column", + "ColumnMetadata", "ColumnRange", + "ExecuteQueryRequest", + "ExecuteQueryResponse", "Family", "FeatureFlags", "FullReadStatsView", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", + "Idempotency", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", "Mutation", + "PartialResultSet", + "PeerInfo", "PingAndWarmRequest", "PingAndWarmResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", + "ProtoFormat", + "ProtoRows", + "ProtoRowsBatch", + "ProtoSchema", "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", @@ -94,6 +227,7 @@ "RequestLatencyStats", "RequestStats", "ResponseParams", + "ResultSetMetadata", "Row", "RowFilter", "RowRange", @@ -104,5 +238,7 @@ "StreamContinuationTokens", "StreamPartition", "TimestampRange", + "Type", + "Value", "ValueRange", ) diff --git a/google/cloud/bigtable_v2/gapic_metadata.json b/google/cloud/bigtable_v2/gapic_metadata.json index 181dc8ff5..83504fbc1 100644 --- a/google/cloud/bigtable_v2/gapic_metadata.json +++ b/google/cloud/bigtable_v2/gapic_metadata.json @@ -15,6 +15,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" @@ -35,6 +40,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" @@ -65,6 +75,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" @@ -85,6 +100,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" @@ -115,6 +135,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" @@ -135,6 +160,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index e546bae05..6d72a226d 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.35.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/services/__init__.py b/google/cloud/bigtable_v2/services/__init__.py index 89a37dc92..cbf94b283 100644 --- a/google/cloud/bigtable_v2/services/__init__.py +++ b/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/__init__.py b/google/cloud/bigtable_v2/services/bigtable/__init__.py index f10a68e5b..c74141156 100644 --- a/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 07a782d0c..0a9442287 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict -import functools import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -35,22 +36,34 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf + try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport from .client import BigtableClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableAsyncClient: """Service for reading from and writing to existing Bigtable @@ -59,11 +72,21 @@ class BigtableAsyncClient: _client: BigtableClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableClient._DEFAULT_UNIVERSE + authorized_view_path = staticmethod(BigtableClient.authorized_view_path) + parse_authorized_view_path = staticmethod(BigtableClient.parse_authorized_view_path) instance_path = staticmethod(BigtableClient.instance_path) parse_instance_path = staticmethod(BigtableClient.parse_instance_path) + materialized_view_path = staticmethod(BigtableClient.materialized_view_path) + parse_materialized_view_path = staticmethod( + BigtableClient.parse_materialized_view_path + ) table_path = staticmethod(BigtableClient.table_path) parse_table_path = staticmethod(BigtableClient.parse_table_path) common_billing_account_path = staticmethod( @@ -161,19 +184,38 @@ def transport(self) -> BigtableTransport: """ return self._client.transport - get_transport_class = functools.partial( - type(BigtableClient).get_transport_class, type(BigtableClient) - ) + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = BigtableClient.get_transport_class def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTransport] = "grpc_asyncio", + transport: Optional[ + Union[str, BigtableTransport, Callable[..., BigtableTransport]] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable client. + """Instantiates the bigtable async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -181,26 +223,43 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableTransport,Callable[..., BigtableTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -212,6 +271,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableAsyncClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -220,7 +301,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -234,8 +315,10 @@ def read_rows( The request object. Request message for Bigtable.ReadRows. table_name (:class:`str`): - Required. The unique name of the table from which to - read. Values are of the form + Optional. The unique name of the table from which to + read. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -250,11 +333,13 @@ def read_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -263,16 +348,22 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.ReadRowsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ReadRowsRequest): + request = bigtable.ReadRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -283,19 +374,43 @@ def read_rows( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_rows, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_rows + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = rpc( @@ -316,7 +431,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -329,8 +444,10 @@ def sample_row_keys( The request object. Request message for Bigtable.SampleRowKeys. table_name (:class:`str`): - Required. The unique name of the table from which to - sample row keys. Values are of the form + Optional. The unique name of the table from which to + sample row keys. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -345,11 +462,13 @@ def sample_row_keys( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -358,16 +477,22 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.SampleRowKeysRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.SampleRowKeysRequest): + request = bigtable.SampleRowKeysRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -378,19 +503,43 @@ def sample_row_keys( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.sample_row_keys, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.sample_row_keys + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = rpc( @@ -413,7 +562,7 @@ async def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -423,8 +572,10 @@ async def mutate_row( The request object. Request message for Bigtable.MutateRow. table_name (:class:`str`): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form + Optional. The unique name of the table to which the + mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -457,11 +608,13 @@ async def mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -470,16 +623,22 @@ async def mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, row_key, mutations, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.MutateRowRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.MutateRowRequest): + request = bigtable.MutateRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -494,29 +653,36 @@ async def mutate_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_row, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.mutate_row + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = await rpc( @@ -538,7 +704,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -549,9 +715,11 @@ def mutate_rows( The request object. Request message for BigtableService.MutateRows. table_name (:class:`str`): - Required. The unique name of the - table to which the mutations should be - applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -579,11 +747,13 @@ def mutate_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -592,16 +762,22 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, entries, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.MutateRowsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.MutateRowsRequest): + request = bigtable.MutateRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -614,19 +790,36 @@ def mutate_rows( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_rows, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.mutate_rows + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = rpc( @@ -651,7 +844,7 @@ async def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -661,9 +854,10 @@ async def check_and_mutate_row( The request object. Request message for Bigtable.CheckAndMutateRow. table_name (:class:`str`): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -718,11 +912,13 @@ async def check_and_mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -731,17 +927,18 @@ async def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [ - table_name, - row_key, - predicate_filter, - true_mutations, - false_mutations, - app_profile_id, - ] + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -749,7 +946,10 @@ async def check_and_mutate_row( "the individual field arguments should be set." ) - request = bigtable.CheckAndMutateRowRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.CheckAndMutateRowRequest): + request = bigtable.CheckAndMutateRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -768,19 +968,36 @@ async def check_and_mutate_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_and_mutate_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.check_and_mutate_row + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = await rpc( @@ -801,7 +1018,7 @@ async def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -828,11 +1045,13 @@ async def ping_and_warm( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -842,16 +1061,22 @@ async def ping_and_warm( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.PingAndWarmRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.PingAndWarmRequest): + request = bigtable.PingAndWarmRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -862,17 +1087,27 @@ async def ping_and_warm( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.ping_and_warm, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.ping_and_warm + ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = await rpc( @@ -895,7 +1130,7 @@ async def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -910,9 +1145,10 @@ async def read_modify_write_row( The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (:class:`str`): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -932,7 +1168,9 @@ async def read_modify_write_row( transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later - ones. + ones. At least one entry must be + specified, and there can be at most + 100000 rules. This corresponds to the ``rules`` field on the ``request`` instance; if ``request`` is provided, this @@ -946,11 +1184,13 @@ async def read_modify_write_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -959,16 +1199,22 @@ async def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, row_key, rules, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.ReadModifyWriteRowRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ReadModifyWriteRowRequest): + request = bigtable.ReadModifyWriteRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -983,19 +1229,36 @@ async def read_modify_write_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_modify_write_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_modify_write_row + ] + + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() # Send the request. response = await rpc( @@ -1018,14 +1281,15 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[ AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse] ]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's + r"""Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): @@ -1052,11 +1316,13 @@ def generate_initial_change_stream_partitions( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1067,16 +1333,24 @@ def generate_initial_change_stream_partitions( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable.GenerateInitialChangeStreamPartitionsRequest + ): + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1087,11 +1361,9 @@ def generate_initial_change_stream_partitions( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_initial_change_stream_partitions, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_initial_change_stream_partitions + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1101,6 +1373,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1120,12 +1395,13 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + r"""Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): @@ -1151,11 +1427,13 @@ def read_change_stream( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1165,16 +1443,22 @@ def read_change_stream( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = bigtable.ReadChangeStreamRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ReadChangeStreamRequest): + request = bigtable.ReadChangeStreamRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1185,11 +1469,9 @@ def read_change_stream( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_change_stream, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_change_stream + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1199,6 +1481,247 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def prepare_query( + self, + request: Optional[Union[bigtable.PrepareQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.PrepareQueryRequest, dict]]): + The request object. Request message for + Bigtable.PrepareQuery + instance_name (:class:`str`): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used + for preparing the query. The actual query execution will + use the app profile specified in the + ``ExecuteQueryRequest``. If not specified, the + ``default`` application profile will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_v2.types.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.PrepareQueryRequest): + request = bigtable.PrepareQueryRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.prepare_query + ] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def execute_query( + self, + request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ExecuteQueryResponse]]: + r"""Executes a SQL query against a particular Bigtable + instance. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]]): + The request object. Request message for + Bigtable.ExecuteQuery + instance_name (:class:`str`): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is + required. Setting both or neither is an + ``INVALID_ARGUMENT``. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Optional. This value specifies routing for replication. + If not specified, the ``default`` application profile + will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: + Response message for + Bigtable.ExecuteQuery + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ExecuteQueryRequest): + request = bigtable.ExecuteQueryRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.execute_query + ] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1221,5 +1744,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("BigtableAsyncClient",) diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index db393faa7..5eb6ba894 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,14 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -29,6 +33,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_v2 import gapic_version as package_version @@ -41,15 +46,26 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport @@ -126,11 +142,43 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtable.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -178,6 +226,30 @@ def transport(self) -> BigtableTransport: """ return self._transport + @staticmethod + def authorized_view_path( + project: str, + instance: str, + table: str, + authorized_view: str, + ) -> str: + """Returns a fully-qualified authorized_view string.""" + return "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + + @staticmethod + def parse_authorized_view_path(path: str) -> Dict[str, str]: + """Parses a authorized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/authorizedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def instance_path( project: str, @@ -195,6 +267,28 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def materialized_view_path( + project: str, + instance: str, + materialized_view: str, + ) -> str: + """Returns a fully-qualified materialized_view string.""" + return "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + + @staticmethod + def parse_materialized_view_path(path: str) -> Dict[str, str]: + """Parses a materialized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/materializedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def table_path( project: str, @@ -298,7 +392,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -328,14 +422,15 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = BigtableClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -343,7 +438,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -361,11 +456,172 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = BigtableClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTransport]] = None, + transport: Optional[ + Union[str, BigtableTransport, Callable[..., BigtableTransport]] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -377,25 +633,37 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + transport (Optional[Union[str,BigtableTransport,Callable[..., BigtableTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -406,17 +674,38 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableClient._read_environment_variables() + self._client_cert_source = BigtableClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert ) + self._universe_domain = BigtableClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False - api_key_value = getattr(client_options, "api_key", None) + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -425,20 +714,30 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTransport): + transport_provided = isinstance(transport, BigtableTransport) + if transport_provided: # transport is a BigtableTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = self._api_endpoint or BigtableClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -448,19 +747,49 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) - self._transport = Transport( + transport_init: Union[ + Type[BigtableTransport], Callable[..., BigtableTransport] + ] = ( + BigtableClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -469,7 +798,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -483,8 +812,10 @@ def read_rows( The request object. Request message for Bigtable.ReadRows. table_name (str): - Required. The unique name of the table from which to - read. Values are of the form + Optional. The unique name of the table from which to + read. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -502,8 +833,10 @@ def read_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -512,19 +845,20 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadRowsRequest): request = bigtable.ReadRowsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -547,14 +881,31 @@ def read_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -574,7 +925,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -587,8 +938,10 @@ def sample_row_keys( The request object. Request message for Bigtable.SampleRowKeys. table_name (str): - Required. The unique name of the table from which to - sample row keys. Values are of the form + Optional. The unique name of the table from which to + sample row keys. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -606,8 +959,10 @@ def sample_row_keys( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -616,19 +971,20 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.SampleRowKeysRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.SampleRowKeysRequest): request = bigtable.SampleRowKeysRequest(request) # If we have keyword arguments corresponding to fields on the @@ -651,14 +1007,31 @@ def sample_row_keys( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -680,7 +1053,7 @@ def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -690,8 +1063,10 @@ def mutate_row( The request object. Request message for Bigtable.MutateRow. table_name (str): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form + Optional. The unique name of the table to which the + mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -727,8 +1102,10 @@ def mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -737,19 +1114,20 @@ def mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, row_key, mutations, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowRequest): request = bigtable.MutateRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -776,14 +1154,24 @@ def mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -804,7 +1192,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -815,9 +1203,11 @@ def mutate_rows( The request object. Request message for BigtableService.MutateRows. table_name (str): - Required. The unique name of the - table to which the mutations should be - applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -848,8 +1238,10 @@ def mutate_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -858,19 +1250,20 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, entries, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowsRequest): request = bigtable.MutateRowsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -895,14 +1288,24 @@ def mutate_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -926,7 +1329,7 @@ def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -936,9 +1339,10 @@ def check_and_mutate_row( The request object. Request message for Bigtable.CheckAndMutateRow. table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -996,8 +1400,10 @@ def check_and_mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -1006,17 +1412,18 @@ def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [ - table_name, - row_key, - predicate_filter, - true_mutations, - false_mutations, - app_profile_id, - ] + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1024,10 +1431,8 @@ def check_and_mutate_row( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.CheckAndMutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.CheckAndMutateRowRequest): request = bigtable.CheckAndMutateRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1058,14 +1463,24 @@ def check_and_mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1085,7 +1500,7 @@ def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -1115,8 +1530,10 @@ def ping_and_warm( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -1126,19 +1543,20 @@ def ping_and_warm( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.PingAndWarmRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.PingAndWarmRequest): request = bigtable.PingAndWarmRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1159,7 +1577,7 @@ def ping_and_warm( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -1167,6 +1585,9 @@ def ping_and_warm( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1188,7 +1609,7 @@ def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -1203,9 +1624,10 @@ def read_modify_write_row( The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -1225,7 +1647,9 @@ def read_modify_write_row( transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later - ones. + ones. At least one entry must be + specified, and there can be at most + 100000 rules. This corresponds to the ``rules`` field on the ``request`` instance; if ``request`` is provided, this @@ -1242,8 +1666,10 @@ def read_modify_write_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -1252,19 +1678,20 @@ def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, row_key, rules, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadModifyWriteRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadModifyWriteRowRequest): request = bigtable.ReadModifyWriteRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1291,14 +1718,24 @@ def read_modify_write_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1320,12 +1757,13 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's + r"""Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): @@ -1355,8 +1793,10 @@ def generate_initial_change_stream_partitions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1367,19 +1807,20 @@ def generate_initial_change_stream_partitions( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.GenerateInitialChangeStreamPartitionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable.GenerateInitialChangeStreamPartitionsRequest ): @@ -1405,6 +1846,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1424,12 +1868,13 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + r"""Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): @@ -1458,8 +1903,10 @@ def read_change_stream( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1469,19 +1916,20 @@ def read_change_stream( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadChangeStreamRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadChangeStreamRequest): request = bigtable.ReadChangeStreamRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1503,6 +1951,241 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def prepare_query( + self, + request: Optional[Union[bigtable.PrepareQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Args: + request (Union[google.cloud.bigtable_v2.types.PrepareQueryRequest, dict]): + The request object. Request message for + Bigtable.PrepareQuery + instance_name (str): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used + for preparing the query. The actual query execution will + use the app profile specified in the + ``ExecuteQueryRequest``. If not specified, the + ``default`` application profile will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_v2.types.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.PrepareQueryRequest): + request = bigtable.PrepareQueryRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.prepare_query] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def execute_query( + self, + request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Iterable[bigtable.ExecuteQueryResponse]: + r"""Executes a SQL query against a particular Bigtable + instance. + + Args: + request (Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]): + The request object. Request message for + Bigtable.ExecuteQuery + instance_name (str): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is + required. Setting both or neither is an + ``INVALID_ARGUMENT``. + + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Optional. This value specifies routing for replication. + If not specified, the ``default`` application profile + will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: + Response message for + Bigtable.ExecuteQuery + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ExecuteQueryRequest): + request = bigtable.ExecuteQueryRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_query] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if True: # always attach app_profile_id, even if empty string + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1532,5 +2215,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("BigtableClient",) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/README.rst b/google/cloud/bigtable_v2/services/bigtable/transports/README.rst new file mode 100644 index 000000000..254812cd3 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableTransport` is the ABC for all transports. +- public child `BigtableGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index c09443bc2..b35e85534 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py index b580bbca7..f08bca73e 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_v2.types import bigtable @@ -32,6 +33,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTransport(abc.ABC): """Abstract transport class for Bigtable.""" @@ -64,15 +68,16 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -89,6 +94,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +108,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) @@ -127,6 +134,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -185,6 +196,26 @@ def _prep_wrapped_messages(self, client_info): default_timeout=43200.0, client_info=client_info, ), + self.prepare_query: gapic_v1.method.wrap_method( + self.prepare_query, + default_timeout=None, + client_info=client_info, + ), + self.execute_query: gapic_v1.method.wrap_method( + self.execute_query, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=43200.0, + ), + default_timeout=43200.0, + client_info=client_info, + ), } def close(self): @@ -291,6 +322,24 @@ def read_change_stream( ]: raise NotImplementedError() + @property + def prepare_query( + self, + ) -> Callable[ + [bigtable.PrepareQueryRequest], + Union[bigtable.PrepareQueryResponse, Awaitable[bigtable.PrepareQueryResponse]], + ]: + raise NotImplementedError() + + @property + def execute_query( + self, + ) -> Callable[ + [bigtable.ExecuteQueryRequest], + Union[bigtable.ExecuteQueryResponse, Awaitable[bigtable.ExecuteQueryResponse]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 8ba04e761..8ddbf15a2 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -21,12 +24,89 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcTransport(BigtableTransport): """gRPC backend transport for Bigtable. @@ -51,7 +131,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -65,20 +145,24 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -88,11 +172,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -118,9 +202,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -159,7 +244,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -175,7 +262,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -196,9 +288,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -254,7 +347,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -284,7 +377,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -311,7 +404,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -339,7 +432,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -368,7 +461,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -396,7 +489,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -430,7 +523,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -447,10 +540,11 @@ def generate_initial_change_stream_partitions( r"""Return a callable for the generate initial change stream partitions method over gRPC. - NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Returns: Callable[[~.GenerateInitialChangeStreamPartitionsRequest], @@ -465,7 +559,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -480,10 +574,11 @@ def read_change_stream( ]: r"""Return a callable for the read change stream method over gRPC. - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Returns: Callable[[~.ReadChangeStreamRequest], @@ -496,15 +591,69 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, ) return self._stubs["read_change_stream"] + @property + def prepare_query( + self, + ) -> Callable[[bigtable.PrepareQueryRequest], bigtable.PrepareQueryResponse]: + r"""Return a callable for the prepare query method over gRPC. + + Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Returns: + Callable[[~.PrepareQueryRequest], + ~.PrepareQueryResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "prepare_query" not in self._stubs: + self._stubs["prepare_query"] = self._logged_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PrepareQuery", + request_serializer=bigtable.PrepareQueryRequest.serialize, + response_deserializer=bigtable.PrepareQueryResponse.deserialize, + ) + return self._stubs["prepare_query"] + + @property + def execute_query( + self, + ) -> Callable[[bigtable.ExecuteQueryRequest], bigtable.ExecuteQueryResponse]: + r"""Return a callable for the execute query method over gRPC. + + Executes a SQL query against a particular Bigtable + instance. + + Returns: + Callable[[~.ExecuteQueryRequest], + ~.ExecuteQueryResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_query" not in self._stubs: + self._stubs["execute_query"] = self._logged_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ExecuteQuery", + request_serializer=bigtable.ExecuteQueryRequest.serialize, + response_deserializer=bigtable.ExecuteQueryResponse.deserialize, + ) + return self._stubs["execute_query"] + def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2c0cbdad6..3e6b70832 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,21 +13,106 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcAsyncIOTransport(BigtableTransport): """gRPC AsyncIO backend transport for Bigtable. @@ -64,9 +149,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -96,7 +181,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -110,21 +195,25 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -134,11 +223,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -164,9 +253,10 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -204,7 +294,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -220,7 +312,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -257,7 +355,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -289,7 +387,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -316,7 +414,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -344,7 +442,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -374,7 +472,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -404,7 +502,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -439,7 +537,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -456,10 +554,11 @@ def generate_initial_change_stream_partitions( r"""Return a callable for the generate initial change stream partitions method over gRPC. - NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Returns: Callable[[~.GenerateInitialChangeStreamPartitionsRequest], @@ -474,7 +573,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -489,10 +588,11 @@ def read_change_stream( ]: r"""Return a callable for the read change stream method over gRPC. - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Returns: Callable[[~.ReadChangeStreamRequest], @@ -505,15 +605,162 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, ) return self._stubs["read_change_stream"] + @property + def prepare_query( + self, + ) -> Callable[ + [bigtable.PrepareQueryRequest], Awaitable[bigtable.PrepareQueryResponse] + ]: + r"""Return a callable for the prepare query method over gRPC. + + Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Returns: + Callable[[~.PrepareQueryRequest], + Awaitable[~.PrepareQueryResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "prepare_query" not in self._stubs: + self._stubs["prepare_query"] = self._logged_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PrepareQuery", + request_serializer=bigtable.PrepareQueryRequest.serialize, + response_deserializer=bigtable.PrepareQueryResponse.deserialize, + ) + return self._stubs["prepare_query"] + + @property + def execute_query( + self, + ) -> Callable[ + [bigtable.ExecuteQueryRequest], Awaitable[bigtable.ExecuteQueryResponse] + ]: + r"""Return a callable for the execute query method over gRPC. + + Executes a SQL query against a particular Bigtable + instance. + + Returns: + Callable[[~.ExecuteQueryRequest], + Awaitable[~.ExecuteQueryResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_query" not in self._stubs: + self._stubs["execute_query"] = self._logged_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ExecuteQuery", + request_serializer=bigtable.ExecuteQueryRequest.serialize, + response_deserializer=bigtable.ExecuteQueryResponse.deserialize, + ) + return self._stubs["execute_query"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.read_rows: self._wrap_method( + self.read_rows, + default_timeout=43200.0, + client_info=client_info, + ), + self.sample_row_keys: self._wrap_method( + self.sample_row_keys, + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_row: self._wrap_method( + self.mutate_row, + default_retry=retries.AsyncRetry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_rows: self._wrap_method( + self.mutate_rows, + default_timeout=600.0, + client_info=client_info, + ), + self.check_and_mutate_row: self._wrap_method( + self.check_and_mutate_row, + default_timeout=20.0, + client_info=client_info, + ), + self.ping_and_warm: self._wrap_method( + self.ping_and_warm, + default_timeout=None, + client_info=client_info, + ), + self.read_modify_write_row: self._wrap_method( + self.read_modify_write_row, + default_timeout=20.0, + client_info=client_info, + ), + self.generate_initial_change_stream_partitions: self._wrap_method( + self.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=client_info, + ), + self.read_change_stream: self._wrap_method( + self.read_change_stream, + default_timeout=43200.0, + client_info=client_info, + ), + self.prepare_query: self._wrap_method( + self.prepare_query, + default_timeout=None, + client_info=client_info, + ), + self.execute_query: self._wrap_method( + self.execute_query, + default_retry=retries.AsyncRetry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=43200.0, + ), + default_timeout=43200.0, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" __all__ = ("BigtableGrpcAsyncIOTransport",) diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 31d230f94..f0a761a36 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,43 +13,55 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings + +from google.cloud.bigtable_v2.types import bigtable + + +from .rest_base import _BaseBigtableRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore -from google.cloud.bigtable_v2.types import bigtable - -from .base import BigtableTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableRestInterceptor: """Interceptor for Bigtable. @@ -74,6 +86,14 @@ def post_check_and_mutate_row(self, response): logging.log(f"Received response: {response}") return response + def pre_execute_query(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_execute_query(self, response): + logging.log(f"Received response: {response}") + return response + def pre_generate_initial_change_stream_partitions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -106,6 +126,14 @@ def post_ping_and_warm(self, response): logging.log(f"Received response: {response}") return response + def pre_prepare_query(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_prepare_query(self, response): + logging.log(f"Received response: {response}") + return response + def pre_read_change_stream(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -147,8 +175,10 @@ def post_sample_row_keys(self, response): def pre_check_and_mutate_row( self, request: bigtable.CheckAndMutateRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for check_and_mutate_row Override in a subclass to manipulate the request or metadata @@ -161,18 +191,92 @@ def post_check_and_mutate_row( ) -> bigtable.CheckAndMutateRowResponse: """Post-rpc interceptor for check_and_mutate_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_check_and_mutate_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. This `post_check_and_mutate_row` interceptor runs + before the `post_check_and_mutate_row_with_metadata` interceptor. + """ + return response + + def post_check_and_mutate_row_with_metadata( + self, + response: bigtable.CheckAndMutateRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.CheckAndMutateRowResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for check_and_mutate_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_check_and_mutate_row_with_metadata` + interceptor in new development instead of the `post_check_and_mutate_row` interceptor. + When both interceptors are used, this `post_check_and_mutate_row_with_metadata` interceptor runs after the + `post_check_and_mutate_row` interceptor. The (possibly modified) response returned by + `post_check_and_mutate_row` will be passed to + `post_check_and_mutate_row_with_metadata`. + """ + return response, metadata + + def pre_execute_query( + self, + request: bigtable.ExecuteQueryRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for execute_query + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_execute_query( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for execute_query + + DEPRECATED. Please use the `post_execute_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_execute_query` interceptor runs + before the `post_execute_query_with_metadata` interceptor. """ return response + def post_execute_query_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for execute_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_execute_query_with_metadata` + interceptor in new development instead of the `post_execute_query` interceptor. + When both interceptors are used, this `post_execute_query_with_metadata` interceptor runs after the + `post_execute_query` interceptor. The (possibly modified) response returned by + `post_execute_query` will be passed to + `post_execute_query_with_metadata`. + """ + return response, metadata + def pre_generate_initial_change_stream_partitions( self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]] + bigtable.GenerateInitialChangeStreamPartitionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_initial_change_stream_partitions @@ -186,15 +290,42 @@ def post_generate_initial_change_stream_partitions( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for generate_initial_change_stream_partitions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_generate_initial_change_stream_partitions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_generate_initial_change_stream_partitions` interceptor runs + before the `post_generate_initial_change_stream_partitions_with_metadata` interceptor. """ return response + def post_generate_initial_change_stream_partitions_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_generate_initial_change_stream_partitions_with_metadata` + interceptor in new development instead of the `post_generate_initial_change_stream_partitions` interceptor. + When both interceptors are used, this `post_generate_initial_change_stream_partitions_with_metadata` interceptor runs after the + `post_generate_initial_change_stream_partitions` interceptor. The (possibly modified) response returned by + `post_generate_initial_change_stream_partitions` will be passed to + `post_generate_initial_change_stream_partitions_with_metadata`. + """ + return response, metadata + def pre_mutate_row( - self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_row Override in a subclass to manipulate the request or metadata @@ -207,15 +338,40 @@ def post_mutate_row( ) -> bigtable.MutateRowResponse: """Post-rpc interceptor for mutate_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_mutate_row` interceptor runs + before the `post_mutate_row_with_metadata` interceptor. """ return response + def post_mutate_row_with_metadata( + self, + response: bigtable.MutateRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_mutate_row_with_metadata` + interceptor in new development instead of the `post_mutate_row` interceptor. + When both interceptors are used, this `post_mutate_row_with_metadata` interceptor runs after the + `post_mutate_row` interceptor. The (possibly modified) response returned by + `post_mutate_row` will be passed to + `post_mutate_row_with_metadata`. + """ + return response, metadata + def pre_mutate_rows( - self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_rows Override in a subclass to manipulate the request or metadata @@ -228,15 +384,42 @@ def post_mutate_rows( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for mutate_rows - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_rows_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_mutate_rows` interceptor runs + before the `post_mutate_rows_with_metadata` interceptor. """ return response + def post_mutate_rows_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for mutate_rows + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_mutate_rows_with_metadata` + interceptor in new development instead of the `post_mutate_rows` interceptor. + When both interceptors are used, this `post_mutate_rows_with_metadata` interceptor runs after the + `post_mutate_rows` interceptor. The (possibly modified) response returned by + `post_mutate_rows` will be passed to + `post_mutate_rows_with_metadata`. + """ + return response, metadata + def pre_ping_and_warm( - self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.PingAndWarmRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for ping_and_warm Override in a subclass to manipulate the request or metadata @@ -249,17 +432,88 @@ def post_ping_and_warm( ) -> bigtable.PingAndWarmResponse: """Post-rpc interceptor for ping_and_warm - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_ping_and_warm_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. This `post_ping_and_warm` interceptor runs + before the `post_ping_and_warm_with_metadata` interceptor. + """ + return response + + def post_ping_and_warm_with_metadata( + self, + response: bigtable.PingAndWarmResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PingAndWarmResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for ping_and_warm + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_ping_and_warm_with_metadata` + interceptor in new development instead of the `post_ping_and_warm` interceptor. + When both interceptors are used, this `post_ping_and_warm_with_metadata` interceptor runs after the + `post_ping_and_warm` interceptor. The (possibly modified) response returned by + `post_ping_and_warm` will be passed to + `post_ping_and_warm_with_metadata`. + """ + return response, metadata + + def pre_prepare_query( + self, + request: bigtable.PrepareQueryRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PrepareQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for prepare_query + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_prepare_query( + self, response: bigtable.PrepareQueryResponse + ) -> bigtable.PrepareQueryResponse: + """Post-rpc interceptor for prepare_query + + DEPRECATED. Please use the `post_prepare_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_prepare_query` interceptor runs + before the `post_prepare_query_with_metadata` interceptor. """ return response + def post_prepare_query_with_metadata( + self, + response: bigtable.PrepareQueryResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PrepareQueryResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for prepare_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_prepare_query_with_metadata` + interceptor in new development instead of the `post_prepare_query` interceptor. + When both interceptors are used, this `post_prepare_query_with_metadata` interceptor runs after the + `post_prepare_query` interceptor. The (possibly modified) response returned by + `post_prepare_query` will be passed to + `post_prepare_query_with_metadata`. + """ + return response, metadata + def pre_read_change_stream( self, request: bigtable.ReadChangeStreamRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_change_stream Override in a subclass to manipulate the request or metadata @@ -272,17 +526,44 @@ def post_read_change_stream( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for read_change_stream - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_change_stream_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_change_stream` interceptor runs + before the `post_read_change_stream_with_metadata` interceptor. """ return response + def post_read_change_stream_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_change_stream + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_change_stream_with_metadata` + interceptor in new development instead of the `post_read_change_stream` interceptor. + When both interceptors are used, this `post_read_change_stream_with_metadata` interceptor runs after the + `post_read_change_stream` interceptor. The (possibly modified) response returned by + `post_read_change_stream` will be passed to + `post_read_change_stream_with_metadata`. + """ + return response, metadata + def pre_read_modify_write_row( self, request: bigtable.ReadModifyWriteRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_modify_write_row Override in a subclass to manipulate the request or metadata @@ -295,15 +576,42 @@ def post_read_modify_write_row( ) -> bigtable.ReadModifyWriteRowResponse: """Post-rpc interceptor for read_modify_write_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_modify_write_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_modify_write_row` interceptor runs + before the `post_read_modify_write_row_with_metadata` interceptor. """ return response + def post_read_modify_write_row_with_metadata( + self, + response: bigtable.ReadModifyWriteRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadModifyWriteRowResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_modify_write_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_modify_write_row_with_metadata` + interceptor in new development instead of the `post_read_modify_write_row` interceptor. + When both interceptors are used, this `post_read_modify_write_row_with_metadata` interceptor runs after the + `post_read_modify_write_row` interceptor. The (possibly modified) response returned by + `post_read_modify_write_row` will be passed to + `post_read_modify_write_row_with_metadata`. + """ + return response, metadata + def pre_read_rows( - self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.ReadRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for read_rows Override in a subclass to manipulate the request or metadata @@ -316,17 +624,42 @@ def post_read_rows( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for read_rows - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_rows_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_rows` interceptor runs + before the `post_read_rows_with_metadata` interceptor. """ return response + def post_read_rows_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_rows + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_rows_with_metadata` + interceptor in new development instead of the `post_read_rows` interceptor. + When both interceptors are used, this `post_read_rows_with_metadata` interceptor runs after the + `post_read_rows` interceptor. The (possibly modified) response returned by + `post_read_rows` will be passed to + `post_read_rows_with_metadata`. + """ + return response, metadata + def pre_sample_row_keys( self, request: bigtable.SampleRowKeysRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for sample_row_keys Override in a subclass to manipulate the request or metadata @@ -339,12 +672,37 @@ def post_sample_row_keys( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for sample_row_keys - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_sample_row_keys_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_sample_row_keys` interceptor runs + before the `post_sample_row_keys_with_metadata` interceptor. """ return response + def post_sample_row_keys_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for sample_row_keys + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_sample_row_keys_with_metadata` + interceptor in new development instead of the `post_sample_row_keys` interceptor. + When both interceptors are used, this `post_sample_row_keys_with_metadata` interceptor runs after the + `post_sample_row_keys` interceptor. The (possibly modified) response returned by + `post_sample_row_keys` will be passed to + `post_sample_row_keys_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableRestStub: @@ -353,8 +711,8 @@ class BigtableRestStub: _interceptor: BigtableRestInterceptor -class BigtableRestTransport(BigtableTransport): - """REST backend transport for Bigtable. +class BigtableRestTransport(_BaseBigtableRestTransport): + """REST backend synchronous transport for Bigtable. Service for reading from and writing to existing Bigtable tables. @@ -364,7 +722,6 @@ class BigtableRestTransport(BigtableTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -386,16 +743,17 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -418,21 +776,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -443,19 +792,34 @@ def __init__( self._interceptor = interceptor or BigtableRestInterceptor() self._prep_wrapped_messages(client_info) - class _CheckAndMutateRow(BigtableRestStub): + class _CheckAndMutateRow( + _BaseBigtableRestTransport._BaseCheckAndMutateRow, BigtableRestStub + ): def __hash__(self): - return hash("CheckAndMutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.CheckAndMutateRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -463,7 +827,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Call the check and mutate row method over HTTP. @@ -474,8 +838,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.CheckAndMutateRowResponse: @@ -484,50 +850,62 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_http_options() + ) + request, metadata = self._interceptor.pre_check_and_mutate_row( request, metadata ) - pb_request = bigtable.CheckAndMutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.CheckAndMutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._CheckAndMutateRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -540,22 +918,217 @@ def __call__( pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_and_mutate_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_check_and_mutate_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.CheckAndMutateRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.check_and_mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GenerateInitialChangeStreamPartitions(BigtableRestStub): + class _ExecuteQuery(_BaseBigtableRestTransport._BaseExecuteQuery, BigtableRestStub): def __hash__(self): - return hash("GenerateInitialChangeStreamPartitions") + return hash("BigtableRestTransport.ExecuteQuery") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + def __call__( + self, + request: bigtable.ExecuteQueryRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the execute query method over HTTP. + + Args: + request (~.bigtable.ExecuteQueryRequest): + The request object. Request message for + Bigtable.ExecuteQuery + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable.ExecuteQueryResponse: + Response message for + Bigtable.ExecuteQuery + + """ + + http_options = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_http_options() + ) + + request, metadata = self._interceptor.pre_execute_query(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_transcoded_request( + http_options, request + ) + ) + + body = _BaseBigtableRestTransport._BaseExecuteQuery._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ExecuteQuery", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ExecuteQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableRestTransport._ExecuteQuery._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.ExecuteQueryResponse + ) + + resp = self._interceptor.post_execute_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_execute_query_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.execute_query", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ExecuteQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + class _GenerateInitialChangeStreamPartitions( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions, + BigtableRestStub, + ): + def __hash__(self): + return hash("BigtableRestTransport.GenerateInitialChangeStreamPartitions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -563,7 +1136,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the generate initial change stream partitions method over HTTP. @@ -577,8 +1150,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: @@ -589,55 +1164,65 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_http_options() + ) + ( request, metadata, ) = self._interceptor.pre_generate_initial_change_stream_partitions( request, metadata ) - pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( - request + transcoded_request = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.GenerateInitialChangeStreamPartitions", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "GenerateInitialChangeStreamPartitions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._GenerateInitialChangeStreamPartitions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -649,24 +1234,61 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.GenerateInitialChangeStreamPartitionsResponse ) + resp = self._interceptor.post_generate_initial_change_stream_partitions( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_generate_initial_change_stream_partitions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.generate_initial_change_stream_partitions", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "GenerateInitialChangeStreamPartitions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _MutateRow(BigtableRestStub): + class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub): def __hash__(self): - return hash("MutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.MutateRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -674,7 +1296,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Call the mutate row method over HTTP. @@ -685,8 +1307,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowResponse: @@ -695,48 +1319,62 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_mutate_row(request, metadata) - pb_request = bigtable.MutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = _BaseBigtableRestTransport._BaseMutateRow._get_http_options() - # Jsonify the request body + request, metadata = self._interceptor.pre_mutate_row(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BaseMutateRow._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseMutateRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseMutateRow._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._MutateRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -749,22 +1387,62 @@ def __call__( pb_resp = bigtable.MutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_mutate_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.MutateRowResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _MutateRows(BigtableRestStub): + class _MutateRows(_BaseBigtableRestTransport._BaseMutateRows, BigtableRestStub): def __hash__(self): - return hash("MutateRows") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.MutateRows") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -772,7 +1450,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the mutate rows method over HTTP. @@ -783,8 +1461,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowsResponse: @@ -793,48 +1473,64 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_mutate_rows(request, metadata) - pb_request = bigtable.MutateRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableRestTransport._BaseMutateRows._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_mutate_rows(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BaseMutateRows._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseMutateRows._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseMutateRows._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._MutateRows._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -846,22 +1542,56 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.MutateRowsResponse ) + resp = self._interceptor.post_mutate_rows(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_rows_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.mutate_rows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRows", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _PingAndWarm(BigtableRestStub): + class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub): def __hash__(self): - return hash("PingAndWarm") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.PingAndWarm") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -869,7 +1599,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Call the ping and warm method over HTTP. @@ -880,8 +1610,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.PingAndWarmResponse: @@ -891,49 +1623,220 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*}:ping", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) - pb_request = bigtable.PingAndWarmRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_http_options() + ) - # Jsonify the request body + request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BasePingAndWarm._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.PingAndWarm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request + response = BigtableRestTransport._PingAndWarm._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.PingAndWarmResponse() + pb_resp = bigtable.PingAndWarmResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_ping_and_warm(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_ping_and_warm_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.PingAndWarmResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.ping_and_warm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _PrepareQuery(_BaseBigtableRestTransport._BasePrepareQuery, BigtableRestStub): + def __hash__(self): + return hash("BigtableRestTransport.PrepareQuery") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] headers = dict(metadata) headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), data=body, ) + return response + + def __call__( + self, + request: bigtable.PrepareQueryRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Call the prepare query method over HTTP. + + Args: + request (~.bigtable.PrepareQueryRequest): + The request object. Request message for + Bigtable.PrepareQuery + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + + http_options = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_http_options() + ) + + request, metadata = self._interceptor.pre_prepare_query(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_transcoded_request( + http_options, request + ) + ) + + body = _BaseBigtableRestTransport._BasePrepareQuery._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.PrepareQuery", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PrepareQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableRestTransport._PrepareQuery._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. @@ -941,26 +1844,68 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable.PingAndWarmResponse() - pb_resp = bigtable.PingAndWarmResponse.pb(resp) + resp = bigtable.PrepareQueryResponse() + pb_resp = bigtable.PrepareQueryResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_ping_and_warm(resp) + + resp = self._interceptor.post_prepare_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_prepare_query_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.PrepareQueryResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.prepare_query", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PrepareQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ReadChangeStream(BigtableRestStub): + class _ReadChangeStream( + _BaseBigtableRestTransport._BaseReadChangeStream, BigtableRestStub + ): def __hash__(self): - return hash("ReadChangeStream") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ReadChangeStream") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -968,7 +1913,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read change stream method over HTTP. @@ -980,8 +1925,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadChangeStreamResponse: @@ -991,50 +1938,66 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_http_options() + ) + request, metadata = self._interceptor.pre_read_change_stream( request, metadata ) - pb_request = bigtable.ReadChangeStreamRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseReadChangeStream._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_request_body_json( + transcoded_request + ) ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadChangeStream", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadChangeStream", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadChangeStream._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1046,22 +2009,58 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.ReadChangeStreamResponse ) + resp = self._interceptor.post_read_change_stream(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_change_stream_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_change_stream", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadChangeStream", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ReadModifyWriteRow(BigtableRestStub): + class _ReadModifyWriteRow( + _BaseBigtableRestTransport._BaseReadModifyWriteRow, BigtableRestStub + ): def __hash__(self): - return hash("ReadModifyWriteRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ReadModifyWriteRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1069,7 +2068,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Call the read modify write row method over HTTP. @@ -1080,8 +2079,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadModifyWriteRowResponse: @@ -1090,50 +2091,62 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_http_options() + ) + request, metadata = self._interceptor.pre_read_modify_write_row( request, metadata ) - pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadModifyWriteRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadModifyWriteRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1146,22 +2159,64 @@ def __call__( pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read_modify_write_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_modify_write_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.ReadModifyWriteRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_modify_write_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _ReadRows(BigtableRestStub): + class _ReadRows(_BaseBigtableRestTransport._BaseReadRows, BigtableRestStub): def __hash__(self): - return hash("ReadRows") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ReadRows") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -1169,7 +2224,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read rows method over HTTP. @@ -1180,8 +2235,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadRowsResponse: @@ -1190,48 +2247,62 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", - "body": "*", - }, - ] - request, metadata = self._interceptor.pre_read_rows(request, metadata) - pb_request = bigtable.ReadRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = _BaseBigtableRestTransport._BaseReadRows._get_http_options() - # Jsonify the request body + request, metadata = self._interceptor.pre_read_rows(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BaseReadRows._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + body = _BaseBigtableRestTransport._BaseReadRows._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseReadRows._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadRows._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1241,22 +2312,58 @@ def __call__( # Return the response resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) + resp = self._interceptor.post_read_rows(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_rows_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_rows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadRows", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _SampleRowKeys(BigtableRestStub): + class _SampleRowKeys( + _BaseBigtableRestTransport._BaseSampleRowKeys, BigtableRestStub + ): def __hash__(self): - return hash("SampleRowKeys") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.SampleRowKeys") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + stream=True, + ) + return response def __call__( self, @@ -1264,7 +2371,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the sample row keys method over HTTP. @@ -1275,8 +2382,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.SampleRowKeysResponse: @@ -1285,39 +2394,59 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", - }, - ] - request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) - pb_request = bigtable.SampleRowKeysRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) + http_options = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_http_options() + ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] + request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_transcoded_request( + http_options, request + ) + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - including_default_value_fields=False, - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - query_params["$alt"] = "json;enum-encoding=int" + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.SampleRowKeys", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "SampleRowKeys", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableRestTransport._SampleRowKeys._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1329,7 +2458,28 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.SampleRowKeysResponse ) + resp = self._interceptor.post_sample_row_keys(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_sample_row_keys_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.sample_row_keys", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "SampleRowKeys", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property @@ -1342,6 +2492,14 @@ def check_and_mutate_row( # In C++ this would require a dynamic_cast return self._CheckAndMutateRow(self._session, self._host, self._interceptor) # type: ignore + @property + def execute_query( + self, + ) -> Callable[[bigtable.ExecuteQueryRequest], bigtable.ExecuteQueryResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExecuteQuery(self._session, self._host, self._interceptor) # type: ignore + @property def generate_initial_change_stream_partitions( self, @@ -1377,6 +2535,14 @@ def ping_and_warm( # In C++ this would require a dynamic_cast return self._PingAndWarm(self._session, self._host, self._interceptor) # type: ignore + @property + def prepare_query( + self, + ) -> Callable[[bigtable.PrepareQueryRequest], bigtable.PrepareQueryResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PrepareQuery(self._session, self._host, self._interceptor) # type: ignore + @property def read_change_stream( self, diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py new file mode 100644 index 000000000..5eab0ded4 --- /dev/null +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py @@ -0,0 +1,720 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_v2.types import bigtable + + +class _BaseBigtableRestTransport(BigtableTransport): + """Base REST backend transport for Bigtable. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtable.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCheckAndMutateRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.CheckAndMutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseExecuteQuery: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{instance_name=projects/*/instances/*}:executeQuery", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ExecuteQueryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseExecuteQuery._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGenerateInitialChangeStreamPartitions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseMutateRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.MutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseMutateRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseMutateRows: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.MutateRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseMutateRows._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePingAndWarm: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*}:ping", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.PingAndWarmRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BasePingAndWarm._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePrepareQuery: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{instance_name=projects/*/instances/*}:prepareQuery", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.PrepareQueryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BasePrepareQuery._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadChangeStream: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadChangeStreamRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseReadChangeStream._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadModifyWriteRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadRows: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:readRows", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSampleRowKeys: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", + }, + { + "method": "get", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys", + }, + { + "method": "get", + "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:sampleRowKeys", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.SampleRowKeysRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableRestTransport",) diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py index f266becb9..b13c076a2 100644 --- a/google/cloud/bigtable_v2/types/__init__.py +++ b/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,8 @@ from .bigtable import ( CheckAndMutateRowRequest, CheckAndMutateRowResponse, + ExecuteQueryRequest, + ExecuteQueryResponse, GenerateInitialChangeStreamPartitionsRequest, GenerateInitialChangeStreamPartitionsResponse, MutateRowRequest, @@ -24,6 +26,8 @@ MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + PrepareQueryRequest, + PrepareQueryResponse, RateLimitInfo, ReadChangeStreamRequest, ReadChangeStreamResponse, @@ -35,12 +39,21 @@ SampleRowKeysResponse, ) from .data import ( + ArrayValue, Cell, Column, + ColumnMetadata, ColumnRange, Family, + Idempotency, Mutation, + PartialResultSet, + ProtoFormat, + ProtoRows, + ProtoRowsBatch, + ProtoSchema, ReadModifyWriteRule, + ResultSetMetadata, Row, RowFilter, RowRange, @@ -49,11 +62,15 @@ StreamContinuationTokens, StreamPartition, TimestampRange, + Value, ValueRange, ) from .feature_flags import ( FeatureFlags, ) +from .peer_info import ( + PeerInfo, +) from .request_stats import ( FullReadStatsView, ReadIterationStats, @@ -63,10 +80,15 @@ from .response_params import ( ResponseParams, ) +from .types import ( + Type, +) __all__ = ( "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", + "ExecuteQueryRequest", + "ExecuteQueryResponse", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", "MutateRowRequest", @@ -75,6 +97,8 @@ "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", @@ -84,12 +108,21 @@ "ReadRowsResponse", "SampleRowKeysRequest", "SampleRowKeysResponse", + "ArrayValue", "Cell", "Column", + "ColumnMetadata", "ColumnRange", "Family", + "Idempotency", "Mutation", + "PartialResultSet", + "ProtoFormat", + "ProtoRows", + "ProtoRowsBatch", + "ProtoSchema", "ReadModifyWriteRule", + "ResultSetMetadata", "Row", "RowFilter", "RowRange", @@ -98,11 +131,14 @@ "StreamContinuationTokens", "StreamPartition", "TimestampRange", + "Value", "ValueRange", "FeatureFlags", + "PeerInfo", "FullReadStatsView", "ReadIterationStats", "RequestLatencyStats", "RequestStats", "ResponseParams", + "Type", ) diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index 57f806408..19abba67b 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats as gb_request_stats +from google.cloud.bigtable_v2.types import types from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore @@ -49,6 +50,10 @@ "GenerateInitialChangeStreamPartitionsResponse", "ReadChangeStreamRequest", "ReadChangeStreamResponse", + "ExecuteQueryRequest", + "ExecuteQueryResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", }, ) @@ -58,9 +63,22 @@ class ReadRowsRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table from which to read. + Optional. The unique name of the table from which to read. + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView from which + to read. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. + materialized_view_name (str): + Optional. The unique name of the MaterializedView from which + to read. + + Values are of the form + ``projects//instances//materializedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -123,6 +141,14 @@ class RequestStatsView(proto.Enum): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=9, + ) + materialized_view_name: str = proto.Field( + proto.STRING, + number=11, + ) app_profile_id: str = proto.Field( proto.STRING, number=5, @@ -171,27 +197,12 @@ class ReadRowsResponse(proto.Message): row key, allowing the client to skip that work on a retry. request_stats (google.cloud.bigtable_v2.types.RequestStats): - If requested, provide enhanced query performance statistics. - The semantics dictate: - - - request_stats is empty on every (streamed) response, - except - - request_stats has non-empty information after all chunks - have been streamed, where the ReadRowsResponse message - only contains request_stats. - - - For example, if a read request would have returned an - empty response instead a single ReadRowsResponse is - streamed with empty chunks and request_stats filled. - - Visually, response messages will stream as follows: ... -> - {chunks: [...]} -> {chunks: [], request_stats: {...}} - \_\ **/ \_**\ \__________/ Primary response Trailer of - RequestStats info - - Or if the read did not return any values: {chunks: [], - request_stats: {...}} \________________________________/ - Trailer of RequestStats info + If requested, return enhanced query performance statistics. + The field request_stats is empty in a streamed response + unless the ReadRowsResponse message contains request_stats + in the last message of the stream. Always returned when + requested, even when the read request returns an empty + response. """ class CellChunk(proto.Message): @@ -327,9 +338,23 @@ class SampleRowKeysRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table from which to sample - row keys. Values are of the form + Optional. The unique name of the table from which to sample + row keys. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView from which + to sample row keys. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. + materialized_view_name (str): + Optional. The unique name of the MaterializedView from which + to read. + + Values are of the form + ``projects//instances//materializedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -340,6 +365,14 @@ class SampleRowKeysRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=4, + ) + materialized_view_name: str = proto.Field( + proto.STRING, + number=5, + ) app_profile_id: str = proto.Field( proto.STRING, number=2, @@ -385,9 +418,17 @@ class MutateRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the mutation - should be applied. Values are of the form + Optional. The unique name of the table to which the mutation + should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + mutation should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -401,12 +442,20 @@ class MutateRowRequest(proto.Message): meaning that earlier mutations can be masked by later ones. Must contain at least one entry and at most 100000. + idempotency (google.cloud.bigtable_v2.types.Idempotency): + If set consistently across retries, prevents + this mutation from being double applied to + aggregate column families within a 15m window. """ table_name: str = proto.Field( proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=6, + ) app_profile_id: str = proto.Field( proto.STRING, number=4, @@ -420,6 +469,11 @@ class MutateRowRequest(proto.Message): number=3, message=data.Mutation, ) + idempotency: data.Idempotency = proto.Field( + proto.MESSAGE, + number=8, + message=data.Idempotency, + ) class MutateRowResponse(proto.Message): @@ -431,8 +485,17 @@ class MutateRowsRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to - which the mutations should be applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -460,6 +523,10 @@ class Entry(proto.Message): order, meaning that earlier mutations can be masked by later ones. You must specify at least one mutation. + idempotency (google.cloud.bigtable_v2.types.Idempotency): + If set consistently across retries, prevents + this mutation from being double applied to + aggregate column families within a 15m window. """ row_key: bytes = proto.Field( @@ -471,11 +538,20 @@ class Entry(proto.Message): number=2, message=data.Mutation, ) + idempotency: data.Idempotency = proto.Field( + proto.MESSAGE, + number=3, + message=data.Idempotency, + ) table_name: str = proto.Field( proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=5, + ) app_profile_id: str = proto.Field( proto.STRING, number=3, @@ -567,8 +643,8 @@ class RateLimitInfo(proto.Message): ``factor`` until another ``period`` has passed. The client can measure its load using any unit that's - comparable over time For example, QPS can be used as long as - each request involves a similar amount of work. + comparable over time. For example, QPS can be used as long + as each request involves a similar amount of work. """ period: duration_pb2.Duration = proto.Field( @@ -587,10 +663,17 @@ class CheckAndMutateRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the - form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + conditional mutation should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -624,6 +707,10 @@ class CheckAndMutateRowRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=9, + ) app_profile_id: str = proto.Field( proto.STRING, number=7, @@ -700,10 +787,17 @@ class ReadModifyWriteRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + read/modify/write rules should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -716,13 +810,19 @@ class ReadModifyWriteRowRequest(proto.Message): row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of - later ones. + later ones. At least one entry must be + specified, and there can be at most 100000 + rules. """ table_name: str = proto.Field( proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=6, + ) app_profile_id: str = proto.Field( proto.STRING, number=4, @@ -840,10 +940,10 @@ class ReadChangeStreamRequest(proto.Message): the stream as part of ``Heartbeat`` and ``CloseStream`` messages. - If a single token is provided, the token’s partition must - exactly match the request’s partition. If multiple tokens + If a single token is provided, the token's partition must + exactly match the request's partition. If multiple tokens are provided, as in the case of a partition merge, the union - of the token partitions must exactly cover the request’s + of the token partitions must exactly cover the request's partition. Otherwise, INVALID_ARGUMENT will be returned. This field is a member of `oneof`_ ``start_from``. @@ -1024,7 +1124,7 @@ class DataChange(proto.Message): a record that will be delivered in the future on the stream. It is possible that, under particular circumstances that a future record - has a timestamp is is lower than a previously + has a timestamp that is lower than a previously seen timestamp. For an example usage see https://beam.apache.org/documentation/basics/#watermarks """ @@ -1108,7 +1208,7 @@ class Heartbeat(proto.Message): a record that will be delivered in the future on the stream. It is possible that, under particular circumstances that a future record - has a timestamp is is lower than a previously + has a timestamp that is lower than a previously seen timestamp. For an example usage see https://beam.apache.org/documentation/basics/#watermarks """ @@ -1131,12 +1231,25 @@ class CloseStream(proto.Message): if there was an ``end_time`` specified). If ``continuation_tokens`` & ``new_partitions`` are present, then a change in partitioning requires the client to open a new stream for each token to resume - reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E) - continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \| - StreamContinuationToken 2 \| StreamContinuationToken 1 To read the - new partition [A,C), supply the continuation tokens whose ranges - cover the new partition, for example ContinuationToken[A,B) & - ContinuationToken[B,C). + reading. Example: + + :: + + [B, D) ends + | + v + new_partitions: [A, C) [C, E) + continuation_tokens.partitions: [B,C) [C,D) + ^---^ ^---^ + ^ ^ + | | + | StreamContinuationToken 2 + | + StreamContinuationToken 1 + + To read the new partition [A,C), supply the continuation tokens + whose ranges cover the new partition, for example + ContinuationToken[A,B) & ContinuationToken[B,C). Attributes: status (google.rpc.status_pb2.Status): @@ -1188,4 +1301,249 @@ class CloseStream(proto.Message): ) +class ExecuteQueryRequest(proto.Message): + r"""Request message for Bigtable.ExecuteQuery + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_name (str): + Required. The unique name of the instance against which the + query should be executed. Values are of the form + ``projects//instances/`` + app_profile_id (str): + Optional. This value specifies routing for replication. If + not specified, the ``default`` application profile will be + used. + query (str): + Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is required. + Setting both or neither is an ``INVALID_ARGUMENT``. + prepared_query (bytes): + A prepared query that was returned from + ``PrepareQueryResponse``. + + Exactly one of ``query`` and ``prepared_query`` is required. + Setting both or neither is an ``INVALID_ARGUMENT``. + + Setting this field also places restrictions on several other + fields: + + - ``data_format`` must be empty. + - ``validate_only`` must be false. + - ``params`` must match the ``param_types`` set in the + ``PrepareQueryRequest``. + proto_format (google.cloud.bigtable_v2.types.ProtoFormat): + Protocol buffer format as described by + ProtoSchema and ProtoRows messages. + + This field is a member of `oneof`_ ``data_format``. + resume_token (bytes): + Optional. If this request is resuming a previously + interrupted query execution, ``resume_token`` should be + copied from the last PartialResultSet yielded before the + interruption. Doing this enables the query execution to + resume where the last one left off. The rest of the request + parameters must exactly match the request that yielded this + token. Otherwise the request will fail. + params (MutableMapping[str, google.cloud.bigtable_v2.types.Value]): + Required. params contains string type keys and Bigtable type + values that bind to placeholders in the query string. In + query string, a parameter placeholder consists of the ``@`` + character followed by the parameter name (for example, + ``@firstName``) in the query string. + + For example, if + ``params["firstName"] = bytes_value: "foo" type {bytes_type {}}`` + then ``@firstName`` will be replaced with googlesql bytes + value "foo" in the query string during query evaluation. + + If ``Value.kind`` is not set, the value is treated as a NULL + value of the given type. For example, if + ``params["firstName"] = type {string_type {}}`` then + ``@firstName`` will be replaced with googlesql null string. + + If ``query`` is set, any empty ``Value.type`` in the map + will be rejected with ``INVALID_ARGUMENT``. + + If ``prepared_query`` is set, any empty ``Value.type`` in + the map will be inferred from the ``param_types`` in the + ``PrepareQueryRequest``. Any non-empty ``Value.type`` must + match the corresponding ``param_types`` entry, or be + rejected with ``INVALID_ARGUMENT``. + """ + + instance_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + query: str = proto.Field( + proto.STRING, + number=3, + ) + prepared_query: bytes = proto.Field( + proto.BYTES, + number=9, + ) + proto_format: data.ProtoFormat = proto.Field( + proto.MESSAGE, + number=4, + oneof="data_format", + message=data.ProtoFormat, + ) + resume_token: bytes = proto.Field( + proto.BYTES, + number=8, + ) + params: MutableMapping[str, data.Value] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=7, + message=data.Value, + ) + + +class ExecuteQueryResponse(proto.Message): + r"""Response message for Bigtable.ExecuteQuery + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + metadata (google.cloud.bigtable_v2.types.ResultSetMetadata): + Structure of rows in this response stream. + The first (and only the first) response streamed + from the server will be of this type. + + This field is a member of `oneof`_ ``response``. + results (google.cloud.bigtable_v2.types.PartialResultSet): + A partial result set with row data + potentially including additional instructions on + how recent past and future partial responses + should be interpreted. + + This field is a member of `oneof`_ ``response``. + """ + + metadata: data.ResultSetMetadata = proto.Field( + proto.MESSAGE, + number=1, + oneof="response", + message=data.ResultSetMetadata, + ) + results: data.PartialResultSet = proto.Field( + proto.MESSAGE, + number=2, + oneof="response", + message=data.PartialResultSet, + ) + + +class PrepareQueryRequest(proto.Message): + r"""Request message for Bigtable.PrepareQuery + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_name (str): + Required. The unique name of the instance against which the + query should be executed. Values are of the form + ``projects//instances/`` + app_profile_id (str): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used for + preparing the query. The actual query execution will use the + app profile specified in the ``ExecuteQueryRequest``. If not + specified, the ``default`` application profile will be used. + query (str): + Required. The query string. + proto_format (google.cloud.bigtable_v2.types.ProtoFormat): + Protocol buffer format as described by + ProtoSchema and ProtoRows messages. + + This field is a member of `oneof`_ ``data_format``. + param_types (MutableMapping[str, google.cloud.bigtable_v2.types.Type]): + Required. ``param_types`` is a map of parameter identifier + strings to their ``Type``\ s. + + In query string, a parameter placeholder consists of the + ``@`` character followed by the parameter name (for example, + ``@firstName``) in the query string. + + For example, if param_types["firstName"] = Bytes then + @firstName will be a query parameter of type Bytes. The + specific ``Value`` to be used for the query execution must + be sent in ``ExecuteQueryRequest`` in the ``params`` map. + """ + + instance_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + query: str = proto.Field( + proto.STRING, + number=3, + ) + proto_format: data.ProtoFormat = proto.Field( + proto.MESSAGE, + number=4, + oneof="data_format", + message=data.ProtoFormat, + ) + param_types: MutableMapping[str, types.Type] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=6, + message=types.Type, + ) + + +class PrepareQueryResponse(proto.Message): + r"""Response message for Bigtable.PrepareQueryResponse + + Attributes: + metadata (google.cloud.bigtable_v2.types.ResultSetMetadata): + Structure of rows in the response stream of + ``ExecuteQueryResponse`` for the returned + ``prepared_query``. + prepared_query (bytes): + A serialized prepared query. Clients should treat this as an + opaque blob of bytes to send in ``ExecuteQueryRequest``. + valid_until (google.protobuf.timestamp_pb2.Timestamp): + The time at which the prepared query token + becomes invalid. A token may become invalid + early due to changes in the data being read, but + it provides a guideline to refresh query plans + asynchronously. + """ + + metadata: data.ResultSetMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=data.ResultSetMetadata, + ) + prepared_query: bytes = proto.Field( + proto.BYTES, + number=2, + ) + valid_until: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index e37644a76..12ac8b2b1 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,6 +19,10 @@ import proto # type: ignore +from google.cloud.bigtable_v2.types import types +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + __protobuf__ = proto.module( package="google.bigtable.v2", @@ -27,6 +31,8 @@ "Family", "Column", "Cell", + "Value", + "ArrayValue", "RowRange", "RowSet", "ColumnRange", @@ -38,6 +44,14 @@ "StreamPartition", "StreamContinuationTokens", "StreamContinuationToken", + "ProtoFormat", + "ColumnMetadata", + "ProtoSchema", + "ResultSetMetadata", + "ProtoRows", + "ProtoRowsBatch", + "PartialResultSet", + "Idempotency", }, ) @@ -164,6 +178,169 @@ class Cell(proto.Message): ) +class Value(proto.Message): + r"""``Value`` represents a dynamically typed value. The typed fields in + ``Value`` are used as a transport encoding for the actual value + (which may be of a more complex type). See the documentation of the + ``Type`` message for more details. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + type_ (google.cloud.bigtable_v2.types.Type): + The verified ``Type`` of this ``Value``, if it cannot be + inferred. + + Read results will never specify the encoding for ``type`` + since the value will already have been decoded by the + server. Furthermore, the ``type`` will be omitted entirely + if it can be inferred from a previous response. The exact + semantics for inferring ``type`` will vary, and are + therefore documented separately for each read method. + + When using composite types (Struct, Array, Map) only the + outermost ``Value`` will specify the ``type``. This + top-level ``type`` will define the types for any nested + ``Struct' fields,``\ Array\ ``elements, or``\ Map\ ``key/value pairs. If a nested``\ Value\ ``provides a``\ type\` + on write, the request will be rejected with + INVALID_ARGUMENT. + raw_value (bytes): + Represents a raw byte sequence with no type information. The + ``type`` field must be omitted. + + This field is a member of `oneof`_ ``kind``. + raw_timestamp_micros (int): + Represents a raw cell timestamp with no type information. + The ``type`` field must be omitted. + + This field is a member of `oneof`_ ``kind``. + bytes_value (bytes): + Represents a typed value transported as a + byte sequence. + + This field is a member of `oneof`_ ``kind``. + string_value (str): + Represents a typed value transported as a + string. + + This field is a member of `oneof`_ ``kind``. + int_value (int): + Represents a typed value transported as an + integer. + + This field is a member of `oneof`_ ``kind``. + bool_value (bool): + Represents a typed value transported as a + boolean. + + This field is a member of `oneof`_ ``kind``. + float_value (float): + Represents a typed value transported as a + floating point number. Does not support NaN or + infinities. + + This field is a member of `oneof`_ ``kind``. + timestamp_value (google.protobuf.timestamp_pb2.Timestamp): + Represents a typed value transported as a + timestamp. + + This field is a member of `oneof`_ ``kind``. + date_value (google.type.date_pb2.Date): + Represents a typed value transported as a + date. + + This field is a member of `oneof`_ ``kind``. + array_value (google.cloud.bigtable_v2.types.ArrayValue): + Represents a typed value transported as a sequence of + values. To differentiate between ``Struct``, ``Array``, and + ``Map``, the outermost ``Value`` must provide an explicit + ``type`` on write. This ``type`` will apply recursively to + the nested ``Struct`` fields, ``Array`` elements, or ``Map`` + key/value pairs, which *must not* supply their own ``type``. + + This field is a member of `oneof`_ ``kind``. + """ + + type_: types.Type = proto.Field( + proto.MESSAGE, + number=7, + message=types.Type, + ) + raw_value: bytes = proto.Field( + proto.BYTES, + number=8, + oneof="kind", + ) + raw_timestamp_micros: int = proto.Field( + proto.INT64, + number=9, + oneof="kind", + ) + bytes_value: bytes = proto.Field( + proto.BYTES, + number=2, + oneof="kind", + ) + string_value: str = proto.Field( + proto.STRING, + number=3, + oneof="kind", + ) + int_value: int = proto.Field( + proto.INT64, + number=6, + oneof="kind", + ) + bool_value: bool = proto.Field( + proto.BOOL, + number=10, + oneof="kind", + ) + float_value: float = proto.Field( + proto.DOUBLE, + number=11, + oneof="kind", + ) + timestamp_value: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=timestamp_pb2.Timestamp, + ) + date_value: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=date_pb2.Date, + ) + array_value: "ArrayValue" = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message="ArrayValue", + ) + + +class ArrayValue(proto.Message): + r"""``ArrayValue`` is an ordered list of ``Value``. + + Attributes: + values (MutableSequence[google.cloud.bigtable_v2.types.Value]): + The ordered elements in the array. + """ + + values: MutableSequence["Value"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Value", + ) + + class RowRange(proto.Message): r"""Specifies a contiguous range of rows. @@ -396,26 +573,26 @@ class RowFilter(proto.Message): transformers), as well as two ways to compose simple filters into more complex ones (chains and interleaves). They work as follows: - - True filters alter the input row by excluding some of its cells - wholesale from the output row. An example of a true filter is the - ``value_regex_filter``, which excludes cells whose values don't - match the specified pattern. All regex true filters use RE2 - syntax (https://github.com/google/re2/wiki/Syntax) in raw byte - mode (RE2::Latin1), and are evaluated as full matches. An - important point to keep in mind is that ``RE2(.)`` is equivalent - by default to ``RE2([^\n])``, meaning that it does not match - newlines. When attempting to match an arbitrary byte, you should - therefore use the escape sequence ``\C``, which may need to be - further escaped as ``\\C`` in your client language. - - - Transformers alter the input row by changing the values of some - of its cells in the output, without excluding them completely. - Currently, the only supported transformer is the - ``strip_value_transformer``, which replaces every cell's value - with the empty string. - - - Chains and interleaves are described in more detail in the - RowFilter.Chain and RowFilter.Interleave documentation. + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't + match the specified pattern. All regex true filters use RE2 syntax + (https://github.com/google/re2/wiki/Syntax) in raw byte mode + (RE2::Latin1), and are evaluated as full matches. An important + point to keep in mind is that ``RE2(.)`` is equivalent by default + to ``RE2([^\n])``, meaning that it does not match newlines. When + attempting to match an arbitrary byte, you should therefore use + the escape sequence ``\C``, which may need to be further escaped + as ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some of + its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value + with the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. The total serialized size of a RowFilter message must not exceed 20480 bytes, and RowFilters may not be nested within each other (in @@ -853,6 +1030,14 @@ class Mutation(proto.Message): set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): Set a cell's value. + This field is a member of `oneof`_ ``mutation``. + add_to_cell (google.cloud.bigtable_v2.types.Mutation.AddToCell): + Incrementally updates an ``Aggregate`` cell. + + This field is a member of `oneof`_ ``mutation``. + merge_to_cell (google.cloud.bigtable_v2.types.Mutation.MergeToCell): + Merges accumulated state to an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. @@ -909,6 +1094,91 @@ class SetCell(proto.Message): number=4, ) + class AddToCell(proto.Message): + r"""A Mutation which incrementally updates a cell in an ``Aggregate`` + family. + + Attributes: + family_name (str): + The name of the ``Aggregate`` family into which new data + should be added. This must be a family with a ``value_type`` + of ``Aggregate``. Format: ``[-_.a-zA-Z0-9]+`` + column_qualifier (google.cloud.bigtable_v2.types.Value): + The qualifier of the column into which new data should be + added. This must be a ``raw_value``. + timestamp (google.cloud.bigtable_v2.types.Value): + The timestamp of the cell to which new data should be added. + This must be a ``raw_timestamp_micros`` that matches the + table's ``granularity``. + input (google.cloud.bigtable_v2.types.Value): + The input value to be accumulated into the specified cell. + This must be compatible with the family's + ``value_type.input_type``. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: "Value" = proto.Field( + proto.MESSAGE, + number=2, + message="Value", + ) + timestamp: "Value" = proto.Field( + proto.MESSAGE, + number=3, + message="Value", + ) + input: "Value" = proto.Field( + proto.MESSAGE, + number=4, + message="Value", + ) + + class MergeToCell(proto.Message): + r"""A Mutation which merges accumulated state into a cell in an + ``Aggregate`` family. + + Attributes: + family_name (str): + The name of the ``Aggregate`` family into which new data + should be added. This must be a family with a ``value_type`` + of ``Aggregate``. Format: ``[-_.a-zA-Z0-9]+`` + column_qualifier (google.cloud.bigtable_v2.types.Value): + The qualifier of the column into which new data should be + added. This must be a ``raw_value``. + timestamp (google.cloud.bigtable_v2.types.Value): + The timestamp of the cell to which new data should be added. + This must be a ``raw_timestamp_micros`` that matches the + table's ``granularity``. + input (google.cloud.bigtable_v2.types.Value): + The input value to be merged into the specified cell. This + must be compatible with the family's + ``value_type.state_type``. Merging ``NULL`` is allowed, but + has no effect. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: "Value" = proto.Field( + proto.MESSAGE, + number=2, + message="Value", + ) + timestamp: "Value" = proto.Field( + proto.MESSAGE, + number=3, + message="Value", + ) + input: "Value" = proto.Field( + proto.MESSAGE, + number=4, + message="Value", + ) + class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. @@ -964,6 +1234,18 @@ class DeleteFromRow(proto.Message): oneof="mutation", message=SetCell, ) + add_to_cell: AddToCell = proto.Field( + proto.MESSAGE, + number=5, + oneof="mutation", + message=AddToCell, + ) + merge_to_cell: MergeToCell = proto.Field( + proto.MESSAGE, + number=6, + oneof="mutation", + message=MergeToCell, + ) delete_from_column: DeleteFromColumn = proto.Field( proto.MESSAGE, number=2, @@ -1098,4 +1380,266 @@ class StreamContinuationToken(proto.Message): ) +class ProtoFormat(proto.Message): + r"""Protocol buffers format descriptor, as described by Messages + ProtoSchema and ProtoRows + + """ + + +class ColumnMetadata(proto.Message): + r"""Describes a column in a Bigtable Query Language result set. + + Attributes: + name (str): + The name of the column. + type_ (google.cloud.bigtable_v2.types.Type): + The type of the column. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: types.Type = proto.Field( + proto.MESSAGE, + number=2, + message=types.Type, + ) + + +class ProtoSchema(proto.Message): + r"""ResultSet schema in proto format + + Attributes: + columns (MutableSequence[google.cloud.bigtable_v2.types.ColumnMetadata]): + The columns in the result set. + """ + + columns: MutableSequence["ColumnMetadata"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ColumnMetadata", + ) + + +class ResultSetMetadata(proto.Message): + r"""Describes the structure of a Bigtable result set. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + proto_schema (google.cloud.bigtable_v2.types.ProtoSchema): + Schema in proto format + + This field is a member of `oneof`_ ``schema``. + """ + + proto_schema: "ProtoSchema" = proto.Field( + proto.MESSAGE, + number=1, + oneof="schema", + message="ProtoSchema", + ) + + +class ProtoRows(proto.Message): + r"""Rows represented in proto format. + + This should be constructed by concatenating the ``batch_data`` from + each of the relevant ``ProtoRowsBatch`` messages and parsing the + result as a ``ProtoRows`` message. + + Attributes: + values (MutableSequence[google.cloud.bigtable_v2.types.Value]): + A proto rows message consists of a list of values. Every N + complete values defines a row, where N is equal to the + number of entries in the ``metadata.proto_schema.columns`` + value received in the first response. + """ + + values: MutableSequence["Value"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Value", + ) + + +class ProtoRowsBatch(proto.Message): + r"""A part of a serialized ``ProtoRows`` message. + + Attributes: + batch_data (bytes): + Part of a serialized ``ProtoRows`` message. A complete, + parseable ProtoRows message is constructed by concatenating + ``batch_data`` from multiple ``ProtoRowsBatch`` messages. + The ``PartialResultSet`` that contains the last part has + ``complete_batch`` set to ``true``. + """ + + batch_data: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + +class PartialResultSet(proto.Message): + r"""A partial result set from the streaming query API. Cloud Bigtable + clients buffer partial results received in this message until a + ``resume_token`` is received. + + The pseudocode below describes how to buffer and parse a stream of + ``PartialResultSet`` messages. + + Having: + + - queue of row results waiting to be returned ``queue`` + - extensible buffer of bytes ``buffer`` + - a place to keep track of the most recent ``resume_token`` for each + PartialResultSet ``p`` received { if p.reset { ensure ``queue`` is + empty ensure ``buffer`` is empty } if p.estimated_batch_size != 0 + { (optional) ensure ``buffer`` is sized to at least + ``p.estimated_batch_size`` } if ``p.proto_rows_batch`` is set { + append ``p.proto_rows_batch.bytes`` to ``buffer`` } if + p.batch_checksum is set and ``buffer`` is not empty { validate the + checksum matches the contents of ``buffer`` (see comments on + ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message, + clearing ``buffer`` add parsed rows to end of ``queue`` } if + p.resume_token is set { release results in ``queue`` save + ``p.resume_token`` in ``resume_token`` } } + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + proto_rows_batch (google.cloud.bigtable_v2.types.ProtoRowsBatch): + Partial rows in serialized ProtoRows format. + + This field is a member of `oneof`_ ``partial_rows``. + batch_checksum (int): + CRC32C checksum of concatenated ``partial_rows`` data for + the current batch. + + When present, the buffered data from ``partial_rows`` forms + a complete parseable message of the appropriate type. + + The client should mark the end of a parseable message and + prepare to receive a new one starting from the next + ``PartialResultSet`` message. Clients must verify the + checksum of the serialized batch before yielding it to the + caller. + + This does NOT mean the values can be yielded to the callers + since a ``resume_token`` is required to safely do so. + + If ``resume_token`` is non-empty and any data has been + received since the last one, this field is guaranteed to be + non-empty. In other words, clients may assume that a batch + will never cross a ``resume_token`` boundary. + + This field is a member of `oneof`_ ``_batch_checksum``. + resume_token (bytes): + An opaque token sent by the server to allow query resumption + and signal that the buffered values constructed from + received ``partial_rows`` can be yielded to the caller. + Clients can provide this token in a subsequent request to + resume the result stream from the current point. + + When ``resume_token`` is non-empty, the buffered values + received from ``partial_rows`` since the last non-empty + ``resume_token`` can be yielded to the callers, provided + that the client keeps the value of ``resume_token`` and uses + it on subsequent retries. + + A ``resume_token`` may be sent without information in + ``partial_rows`` to checkpoint the progress of a sparse + query. Any previous ``partial_rows`` data should still be + yielded in this case, and the new ``resume_token`` should be + saved for future retries as normal. + + A ``resume_token`` will only be sent on a boundary where + there is either no ongoing result batch, or + ``batch_checksum`` is also populated. + + The server will also send a sentinel ``resume_token`` when + last batch of ``partial_rows`` is sent. If the client + retries the ExecuteQueryRequest with the sentinel + ``resume_token``, the server will emit it again without any + data in ``partial_rows``, then return OK. + reset (bool): + If ``true``, any data buffered since the last non-empty + ``resume_token`` must be discarded before the other parts of + this message, if any, are handled. + estimated_batch_size (int): + Estimated size of the buffer required to hold the next batch + of results. + + This value will be sent with the first ``partial_rows`` of a + batch. That is, on the first ``partial_rows`` received in a + stream, on the first message after a ``batch_checksum`` + message, and any time ``reset`` is true. + + The client can use this estimate to allocate a buffer for + the next batch of results. This helps minimize the number of + allocations required, though the buffer size may still need + to be increased if the estimate is too low. + """ + + proto_rows_batch: "ProtoRowsBatch" = proto.Field( + proto.MESSAGE, + number=3, + oneof="partial_rows", + message="ProtoRowsBatch", + ) + batch_checksum: int = proto.Field( + proto.UINT32, + number=6, + optional=True, + ) + resume_token: bytes = proto.Field( + proto.BYTES, + number=5, + ) + reset: bool = proto.Field( + proto.BOOL, + number=7, + ) + estimated_batch_size: int = proto.Field( + proto.INT32, + number=4, + ) + + +class Idempotency(proto.Message): + r"""Parameters on mutations where clients want to ensure + idempotency (i.e. at-most-once semantics). This is currently + only needed for certain aggregate types. + + Attributes: + token (bytes): + Unique token used to identify replays of this + mutation. Must be at least 8 bytes long. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Client-assigned timestamp when the mutation's + first attempt was sent. Used to reject mutations + that arrive after idempotency protection may + have expired. May cause spurious rejections if + clock skew is too high. + + Leave unset or zero to always accept the + mutation, at the risk of double counting if the + protection for previous attempts has expired. + """ + + token: bytes = proto.Field( + proto.BYTES, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py index 92ac5023d..2c8ea8732 100644 --- a/google/cloud/bigtable_v2/types/feature_flags.py +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,6 +59,26 @@ class FeatureFlags(proto.Message): Notify the server that the client supports the last_scanned_row field in ReadRowsResponse for long-running scans. + routing_cookie (bool): + Notify the server that the client supports + using encoded routing cookie strings to retry + requests with. + retry_info (bool): + Notify the server that the client supports + using retry info back off durations to retry + requests with. + client_side_metrics_enabled (bool): + Notify the server that the client has client + side metrics enabled. + traffic_director_enabled (bool): + Notify the server that the client using + Traffic Director endpoint. + direct_access_requested (bool): + Notify the server that the client explicitly + opted in for Direct Access. + peer_info (bool): + If the client can support using + BigtablePeerInfo. """ reverse_scans: bool = proto.Field( @@ -77,6 +97,30 @@ class FeatureFlags(proto.Message): proto.BOOL, number=4, ) + routing_cookie: bool = proto.Field( + proto.BOOL, + number=6, + ) + retry_info: bool = proto.Field( + proto.BOOL, + number=7, + ) + client_side_metrics_enabled: bool = proto.Field( + proto.BOOL, + number=8, + ) + traffic_director_enabled: bool = proto.Field( + proto.BOOL, + number=9, + ) + direct_access_requested: bool = proto.Field( + proto.BOOL, + number=10, + ) + peer_info: bool = proto.Field( + proto.BOOL, + number=11, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/peer_info.py b/google/cloud/bigtable_v2/types/peer_info.py new file mode 100644 index 000000000..b3f1203cc --- /dev/null +++ b/google/cloud/bigtable_v2/types/peer_info.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "PeerInfo", + }, +) + + +class PeerInfo(proto.Message): + r"""PeerInfo contains information about the peer that the client + is connecting to. + + Attributes: + google_frontend_id (int): + An opaque identifier for the Google Frontend + which serviced this request. Only set when not + using DirectAccess. + application_frontend_id (int): + An opaque identifier for the application + frontend which serviced this request. + application_frontend_zone (str): + The Cloud zone of the application frontend + that served this request. + application_frontend_subzone (str): + The subzone of the application frontend that + served this request, e.g. an identifier for + where within the zone the application frontend + is. + transport_type (google.cloud.bigtable_v2.types.PeerInfo.TransportType): + + """ + + class TransportType(proto.Enum): + r"""The transport type that the client used to connect to this + peer. + + Values: + TRANSPORT_TYPE_UNKNOWN (0): + The transport type is unknown. + TRANSPORT_TYPE_EXTERNAL (1): + The client connected to this peer via an + external network (e.g. outside Google Coud). + TRANSPORT_TYPE_CLOUD_PATH (2): + The client connected to this peer via + CloudPath. + TRANSPORT_TYPE_DIRECT_ACCESS (3): + The client connected to this peer via + DirectAccess. + TRANSPORT_TYPE_SESSION_UNKNOWN (4): + The client connected to this peer via + Bigtable Sessions using an unknown transport + type. + TRANSPORT_TYPE_SESSION_EXTERNAL (5): + The client connected to this peer via + Bigtable Sessions on an external network (e.g. + outside Google Cloud). + TRANSPORT_TYPE_SESSION_CLOUD_PATH (6): + The client connected to this peer via + Bigtable Sessions using CloudPath. + TRANSPORT_TYPE_SESSION_DIRECT_ACCESS (7): + The client connected to this peer via + Bigtable Sessions using DirectAccess. + """ + TRANSPORT_TYPE_UNKNOWN = 0 + TRANSPORT_TYPE_EXTERNAL = 1 + TRANSPORT_TYPE_CLOUD_PATH = 2 + TRANSPORT_TYPE_DIRECT_ACCESS = 3 + TRANSPORT_TYPE_SESSION_UNKNOWN = 4 + TRANSPORT_TYPE_SESSION_EXTERNAL = 5 + TRANSPORT_TYPE_SESSION_CLOUD_PATH = 6 + TRANSPORT_TYPE_SESSION_DIRECT_ACCESS = 7 + + google_frontend_id: int = proto.Field( + proto.INT64, + number=1, + ) + application_frontend_id: int = proto.Field( + proto.INT64, + number=2, + ) + application_frontend_zone: str = proto.Field( + proto.STRING, + number=3, + ) + application_frontend_subzone: str = proto.Field( + proto.STRING, + number=4, + ) + transport_type: TransportType = proto.Field( + proto.ENUM, + number=5, + enum=TransportType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/request_stats.py b/google/cloud/bigtable_v2/types/request_stats.py index 61cce9491..540e6548d 100644 --- a/google/cloud/bigtable_v2/types/request_stats.py +++ b/google/cloud/bigtable_v2/types/request_stats.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -142,11 +142,10 @@ class FullReadStatsView(proto.Message): class RequestStats(proto.Message): - r"""RequestStats is the container for additional information pertaining - to a single request, helpful for evaluating the performance of the - sent request. Currently, there are the following supported methods: - - - google.bigtable.v2.ReadRows + r"""RequestStats is the container for additional information + pertaining to a single request, helpful for evaluating the + performance of the sent request. Currently, the following method + is supported: google.bigtable.v2.ReadRows .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields diff --git a/google/cloud/bigtable_v2/types/response_params.py b/google/cloud/bigtable_v2/types/response_params.py index 98e3a67db..cc6384ab3 100644 --- a/google/cloud/bigtable_v2/types/response_params.py +++ b/google/cloud/bigtable_v2/types/response_params.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,10 +29,7 @@ class ResponseParams(proto.Message): - r"""Response metadata proto This is an experimental feature that will be - used to get zone_id and cluster_id from response trailers to tag the - metrics. This should not be used by customers directly - + r"""Response metadata proto .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -47,6 +44,11 @@ class ResponseParams(proto.Message): of bigtable resources. This field is a member of `oneof`_ ``_cluster_id``. + afe_id (int): + The AFE ID for the AFE that is served this + request. + + This field is a member of `oneof`_ ``_afe_id``. """ zone_id: str = proto.Field( @@ -59,6 +61,11 @@ class ResponseParams(proto.Message): number=2, optional=True, ) + afe_id: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_v2/types/types.py b/google/cloud/bigtable_v2/types/types.py new file mode 100644 index 000000000..0b4ddb57a --- /dev/null +++ b/google/cloud/bigtable_v2/types/types.py @@ -0,0 +1,875 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "Type", + }, +) + + +class Type(proto.Message): + r"""``Type`` represents the type of data that is written to, read from, + or stored in Bigtable. It is heavily based on the GoogleSQL standard + to help maintain familiarity and consistency across products and + features. + + For compatibility with Bigtable's existing untyped APIs, each + ``Type`` includes an ``Encoding`` which describes how to convert to + or from the underlying data. + + Each encoding can operate in one of two modes: + + - Sorted: In this mode, Bigtable guarantees that + ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is + useful anywhere sort order is important, for example when encoding + keys. + - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` + then ``Encode(X) != Encode(Y)``. However, the converse is not + guaranteed. For example, both ``{'foo': '1', 'bar': '2'}`` and + ``{'bar': '2', 'foo': '1'}`` are valid encodings of the same JSON + value. + + The API clearly documents which mode is used wherever an encoding + can be configured. Each encoding also documents which values are + supported in which modes. For example, when encoding INT64 as a + numeric STRING, negative numbers cannot be encoded in sorted mode. + This is because ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001")``. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bytes_type (google.cloud.bigtable_v2.types.Type.Bytes): + Bytes + + This field is a member of `oneof`_ ``kind``. + string_type (google.cloud.bigtable_v2.types.Type.String): + String + + This field is a member of `oneof`_ ``kind``. + int64_type (google.cloud.bigtable_v2.types.Type.Int64): + Int64 + + This field is a member of `oneof`_ ``kind``. + float32_type (google.cloud.bigtable_v2.types.Type.Float32): + Float32 + + This field is a member of `oneof`_ ``kind``. + float64_type (google.cloud.bigtable_v2.types.Type.Float64): + Float64 + + This field is a member of `oneof`_ ``kind``. + bool_type (google.cloud.bigtable_v2.types.Type.Bool): + Bool + + This field is a member of `oneof`_ ``kind``. + timestamp_type (google.cloud.bigtable_v2.types.Type.Timestamp): + Timestamp + + This field is a member of `oneof`_ ``kind``. + date_type (google.cloud.bigtable_v2.types.Type.Date): + Date + + This field is a member of `oneof`_ ``kind``. + aggregate_type (google.cloud.bigtable_v2.types.Type.Aggregate): + Aggregate + + This field is a member of `oneof`_ ``kind``. + struct_type (google.cloud.bigtable_v2.types.Type.Struct): + Struct + + This field is a member of `oneof`_ ``kind``. + array_type (google.cloud.bigtable_v2.types.Type.Array): + Array + + This field is a member of `oneof`_ ``kind``. + map_type (google.cloud.bigtable_v2.types.Type.Map): + Map + + This field is a member of `oneof`_ ``kind``. + proto_type (google.cloud.bigtable_v2.types.Type.Proto): + Proto + + This field is a member of `oneof`_ ``kind``. + enum_type (google.cloud.bigtable_v2.types.Type.Enum): + Enum + + This field is a member of `oneof`_ ``kind``. + """ + + class Bytes(proto.Message): + r"""Bytes Values of type ``Bytes`` are stored in ``Value.bytes_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Bytes.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + raw (google.cloud.bigtable_v2.types.Type.Bytes.Encoding.Raw): + Use ``Raw`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Raw(proto.Message): + r"""Leaves the value as-is. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + Attributes: + escape_nulls (bool): + If set, allows NULL values to be encoded as the empty string + "". + + The actual empty string, or any value which only contains + the null byte ``0x00``, has one more null byte appended. + """ + + escape_nulls: bool = proto.Field( + proto.BOOL, + number=1, + ) + + raw: "Type.Bytes.Encoding.Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Bytes.Encoding.Raw", + ) + + encoding: "Type.Bytes.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes.Encoding", + ) + + class String(proto.Message): + r"""String Values of type ``String`` are stored in + ``Value.string_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.String.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + utf8_raw (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Raw): + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. + utf8_bytes (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Bytes): + Use ``Utf8Bytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + + class Utf8Bytes(proto.Message): + r"""UTF-8 encoding. + + Sorted mode: + + - All values are supported. + - Code point order is preserved. + + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` + + Attributes: + null_escape_char (str): + Single-character escape sequence used to support NULL + values. + + If set, allows NULL values to be encoded as the empty string + "". + + The actual empty string, or any value where every character + equals ``null_escape_char``, has one more + ``null_escape_char`` appended. + + If ``null_escape_char`` is set and does not equal the ASCII + null character ``0x00``, then the encoding will not support + sorted mode. + + . + """ + + null_escape_char: str = proto.Field( + proto.STRING, + number=1, + ) + + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.String.Encoding.Utf8Raw", + ) + utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.String.Encoding.Utf8Bytes", + ) + + encoding: "Type.String.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.String.Encoding", + ) + + class Int64(proto.Message): + r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Int64.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + big_endian_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.BigEndianBytes): + Use ``BigEndianBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.OrderedCodeBytes): + Use ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class BigEndianBytes(proto.Message): + r"""Encodes the value as an 8-byte big-endian two's complement value. + + Sorted mode: non-negative values are supported. + + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + + Attributes: + bytes_type (google.cloud.bigtable_v2.types.Type.Bytes): + Deprecated: ignored if set. + """ + + bytes_type: "Type.Bytes" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes", + ) + + class OrderedCodeBytes(proto.Message): + r"""Encodes the value in a variable length binary format of up to + 10 bytes. Values that are closer to zero use fewer bytes. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + """ + + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding.BigEndianBytes", + ) + ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Int64.Encoding.OrderedCodeBytes", + ) + + encoding: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Int64.Encoding", + ) + + class Bool(proto.Message): + r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" + + class Float32(proto.Message): + r"""Float32 Values of type ``Float32`` are stored in + ``Value.float_value``. + + """ + + class Float64(proto.Message): + r"""Float64 Values of type ``Float64`` are stored in + ``Value.float_value``. + + """ + + class Timestamp(proto.Message): + r"""Timestamp Values of type ``Timestamp`` are stored in + ``Value.timestamp_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Timestamp.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + unix_micros_int64 (google.cloud.bigtable_v2.types.Type.Int64.Encoding): + Encodes the number of microseconds since the Unix epoch + using the given ``Int64`` encoding. Values must be + microsecond-aligned. + + Compatible with: + + - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` + + This field is a member of `oneof`_ ``encoding``. + """ + + unix_micros_int64: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding", + ) + + encoding: "Type.Timestamp.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Timestamp.Encoding", + ) + + class Date(proto.Message): + r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" + + class Struct(proto.Message): + r"""A structured data value, consisting of fields which map to + dynamically typed values. Values of type ``Struct`` are stored in + ``Value.array_value`` where entries are in the same order and number + as ``field_types``. + + Attributes: + fields (MutableSequence[google.cloud.bigtable_v2.types.Type.Struct.Field]): + The names and types of the fields in this + struct. + encoding (google.cloud.bigtable_v2.types.Type.Struct.Encoding): + The encoding to use when converting to or + from lower level types. + """ + + class Field(proto.Message): + r"""A struct field and its type. + + Attributes: + field_name (str): + The field name (optional). Fields without a ``field_name`` + are considered anonymous and cannot be referenced by name. + type_ (google.cloud.bigtable_v2.types.Type): + The type of values in this field. + """ + + field_name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + singleton (google.cloud.bigtable_v2.types.Type.Struct.Encoding.Singleton): + Use ``Singleton`` encoding. + + This field is a member of `oneof`_ ``encoding``. + delimited_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.DelimitedBytes): + Use ``DelimitedBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.OrderedCodeBytes): + User ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Singleton(proto.Message): + r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if + ``fields.size == 1``. + + """ + + class DelimitedBytes(proto.Message): + r"""Fields are encoded independently and concatenated with a + configurable ``delimiter`` in between. + + A struct with no fields defined is encoded as a single + ``delimiter``. + + Sorted mode: + + - Fields are encoded in sorted mode. + - Encoded field values must not contain any bytes <= + ``delimiter[0]`` + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - Encoded field values must not contain ``delimiter[0]``. + + Attributes: + delimiter (bytes): + Byte sequence used to delimit concatenated + fields. The delimiter must contain at least 1 + character and at most 50 characters. + """ + + delimiter: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + class OrderedCodeBytes(proto.Message): + r"""Fields are encoded independently and concatenated with the fixed + byte pair ``{0x00, 0x01}`` in between. + + Any null ``(0x00)`` byte in an encoded field is replaced by the + fixed byte pair ``{0x00, 0xFF}``. + + Fields that encode to the empty string "" have special handling: + + - If *every* field encodes to "", or if the STRUCT has no fields + defined, then the STRUCT is encoded as the fixed byte pair + ``{0x00, 0x00}``. + - Otherwise, the STRUCT only encodes until the last non-empty field, + omitting any trailing empty fields. Any empty fields that aren't + omitted are replaced with the fixed byte pair ``{0x00, 0x00}``. + + Examples: + + :: + + - STRUCT() -> "\00\00" + - STRUCT("") -> "\00\00" + - STRUCT("", "") -> "\00\00" + - STRUCT("", "B") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "") -> "A" + - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + "C" + + Since null bytes are always escaped, this encoding can cause size + blowup for encodings like ``Int64.BigEndianBytes`` that are likely + to produce many such bytes. + + Sorted mode: + + - Fields are encoded in sorted mode. + - All values supported by the field encodings are allowed + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - All values supported by the field encodings are allowed. + + """ + + singleton: "Type.Struct.Encoding.Singleton" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Struct.Encoding.Singleton", + ) + delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Struct.Encoding.DelimitedBytes", + ) + ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=3, + oneof="encoding", + message="Type.Struct.Encoding.OrderedCodeBytes", + ) + + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Type.Struct.Field", + ) + encoding: "Type.Struct.Encoding" = proto.Field( + proto.MESSAGE, + number=2, + message="Type.Struct.Encoding", + ) + + class Proto(proto.Message): + r"""A protobuf message type. Values of type ``Proto`` are stored in + ``Value.bytes_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this proto + is defined in. + message_name (str): + The fully qualified name of the protobuf + message, including package. In the format of + "foo.bar.Message". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + message_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Enum(proto.Message): + r"""A protobuf enum type. Values of type ``Enum`` are stored in + ``Value.int_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this enum is + defined in. + enum_name (str): + The fully qualified name of the protobuf enum + message, including package. In the format of + "foo.bar.EnumMessage". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + enum_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Array(proto.Message): + r"""An ordered list of elements of a given type. Values of type + ``Array`` are stored in ``Value.array_value``. + + Attributes: + element_type (google.cloud.bigtable_v2.types.Type): + The type of the elements in the array. This must not be + ``Array``. + """ + + element_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + + class Map(proto.Message): + r"""A mapping of keys to values of a given type. Values of type ``Map`` + are stored in a ``Value.array_value`` where each entry is another + ``Value.array_value`` with two elements (the key and the value, in + that order). Normally encoded Map values won't have repeated keys, + however, clients are expected to handle the case in which they do. + If the same key appears multiple times, the *last* value takes + precedence. + + Attributes: + key_type (google.cloud.bigtable_v2.types.Type): + The type of a map key. Only ``Bytes``, ``String``, and + ``Int64`` are allowed as key types. + value_type (google.cloud.bigtable_v2.types.Type): + The type of the values in a map. + """ + + key_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + value_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + class Aggregate(proto.Message): + r"""A value that combines incremental updates into a summarized value. + + Data is never directly written or read using type ``Aggregate``. + Writes provide either the ``input_type`` or ``state_type``, and + reads always return the ``state_type`` . + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + input_type (google.cloud.bigtable_v2.types.Type): + Type of the inputs that are accumulated by this + ``Aggregate``. Use ``AddInput`` mutations to accumulate new + inputs. + state_type (google.cloud.bigtable_v2.types.Type): + Output only. Type that holds the internal accumulator state + for the ``Aggregate``. This is a function of the + ``input_type`` and ``aggregator`` chosen. + sum (google.cloud.bigtable_v2.types.Type.Aggregate.Sum): + Sum aggregator. + + This field is a member of `oneof`_ ``aggregator``. + hllpp_unique_count (google.cloud.bigtable_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): + HyperLogLogPlusPlusUniqueCount aggregator. + + This field is a member of `oneof`_ ``aggregator``. + max_ (google.cloud.bigtable_v2.types.Type.Aggregate.Max): + Max aggregator. + + This field is a member of `oneof`_ ``aggregator``. + min_ (google.cloud.bigtable_v2.types.Type.Aggregate.Min): + Min aggregator. + + This field is a member of `oneof`_ ``aggregator``. + """ + + class Sum(proto.Message): + r"""Computes the sum of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Max(proto.Message): + r"""Computes the max of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Min(proto.Message): + r"""Computes the min of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class HyperLogLogPlusPlusUniqueCount(proto.Message): + r"""Computes an approximate unique count over the input values. When + using raw data as input, be careful to use a consistent encoding. + Otherwise the same value encoded differently could count more than + once, or two distinct values could count as identical. Input: Any, + or omit for Raw State: TBD Special state conversions: ``Int64`` (the + unique count estimate) + + """ + + input_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + state_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + sum: "Type.Aggregate.Sum" = proto.Field( + proto.MESSAGE, + number=4, + oneof="aggregator", + message="Type.Aggregate.Sum", + ) + hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( + proto.Field( + proto.MESSAGE, + number=5, + oneof="aggregator", + message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", + ) + ) + max_: "Type.Aggregate.Max" = proto.Field( + proto.MESSAGE, + number=6, + oneof="aggregator", + message="Type.Aggregate.Max", + ) + min_: "Type.Aggregate.Min" = proto.Field( + proto.MESSAGE, + number=7, + oneof="aggregator", + message="Type.Aggregate.Min", + ) + + bytes_type: Bytes = proto.Field( + proto.MESSAGE, + number=1, + oneof="kind", + message=Bytes, + ) + string_type: String = proto.Field( + proto.MESSAGE, + number=2, + oneof="kind", + message=String, + ) + int64_type: Int64 = proto.Field( + proto.MESSAGE, + number=5, + oneof="kind", + message=Int64, + ) + float32_type: Float32 = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=Float32, + ) + float64_type: Float64 = proto.Field( + proto.MESSAGE, + number=9, + oneof="kind", + message=Float64, + ) + bool_type: Bool = proto.Field( + proto.MESSAGE, + number=8, + oneof="kind", + message=Bool, + ) + timestamp_type: Timestamp = proto.Field( + proto.MESSAGE, + number=10, + oneof="kind", + message=Timestamp, + ) + date_type: Date = proto.Field( + proto.MESSAGE, + number=11, + oneof="kind", + message=Date, + ) + aggregate_type: Aggregate = proto.Field( + proto.MESSAGE, + number=6, + oneof="kind", + message=Aggregate, + ) + struct_type: Struct = proto.Field( + proto.MESSAGE, + number=7, + oneof="kind", + message=Struct, + ) + array_type: Array = proto.Field( + proto.MESSAGE, + number=3, + oneof="kind", + message=Array, + ) + map_type: Map = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message=Map, + ) + proto_type: Proto = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=Proto, + ) + enum_type: Enum = proto.Field( + proto.MESSAGE, + number=14, + oneof="kind", + message=Enum, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini index f12ed46fc..701b7587c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,6 +1,9 @@ [mypy] -python_version = 3.6 +python_version = 3.13 namespace_packages = True +check_untyped_defs = True +warn_unreachable = True +disallow_any_generics = True exclude = tests/unit/gapic/ [mypy-grpc.*] @@ -26,3 +29,10 @@ ignore_missing_imports = True [mypy-pytest] ignore_missing_imports = True + +[mypy-google.cloud.*] +ignore_errors = True + +# only verify data client +[mypy-google.cloud.bigtable.data.*] +ignore_errors = False diff --git a/noxfile.py b/noxfile.py index b2820b309..8df24410c 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,55 +14,80 @@ # See the License for the specific language governing permissions and # limitations under the License. +# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input` +# The source of truth for this file is `.librarian/generator-input` + + # Generated by synthtool. DO NOT EDIT! from __future__ import absolute_import + import os import pathlib import re import shutil +from typing import Dict, List import warnings import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" -LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] - -DEFAULT_PYTHON_VERSION = "3.8" - -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +BLACK_VERSION = "black[jupyter]==23.3.0" +ISORT_VERSION = "isort==5.11.0" +LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"] + +DEFAULT_PYTHON_VERSION = "3.13" + +UNIT_TEST_PYTHON_VERSIONS: List[str] = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", + "3.14", +] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", "pytest", "pytest-cov", "pytest-asyncio", + BLACK_VERSION, + "autoflake", ] -UNIT_TEST_EXTERNAL_DEPENDENCIES = [] -UNIT_TEST_LOCAL_DEPENDENCIES = [] -UNIT_TEST_DEPENDENCIES = [] -UNIT_TEST_EXTRAS = [] -UNIT_TEST_EXTRAS_BY_PYTHON = {} - -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -SYSTEM_TEST_STANDARD_DEPENDENCIES = [ +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.10", "3.14"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] -SYSTEM_TEST_LOCAL_DEPENDENCIES = [] -SYSTEM_TEST_DEPENDENCIES = [] -SYSTEM_TEST_EXTRAS = [] -SYSTEM_TEST_EXTRAS_BY_PYTHON = {} +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ + "pytest-asyncio==0.21.2", + BLACK_VERSION, + "pyyaml==6.0.2", +] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ - "unit", + "unit-3.10", + "unit-3.11", + "unit-3.12", + "unit-3.13", + "unit-3.14", "system_emulated", "system", "mypy", @@ -71,6 +96,7 @@ "lint_setup_py", "blacken", "docs", + "format", ] # Error if a python version is missing @@ -131,14 +157,13 @@ def mypy(session): "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" ) session.install("google-cloud-testutils") - # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") + session.run("mypy", "-p", "google.cloud.bigtable.data") @nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") + session.install("setuptools", "docutils", "pygments") session.run("python", "setup.py", "check", "--restructuredtext", "--strict") @@ -170,14 +195,28 @@ def install_unittest_dependencies(session, *constraints): session.install("-e", ".", *constraints) -def default(session): +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def unit(session, protobuf_implementation): # Install all test dependencies, then install this package in-place. + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) install_unittest_dependencies(session, "-c", constraints_path) + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + # Run py.test against the unit tests. session.run( "py.test", @@ -191,17 +230,13 @@ def default(session): "--cov-fail-under=0", os.path.join("tests", "unit"), *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) -@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) -def unit(session): - """Run the unit test suite.""" - default(session) - - def install_systemtest_dependencies(session, *constraints): - # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. # See https://github.com/grpc/grpc/issues/32163 @@ -231,7 +266,7 @@ def install_systemtest_dependencies(session, *constraints): session.install("-e", ".", *constraints) -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +@nox.session(python=DEFAULT_PYTHON_VERSION) def system_emulated(session): import subprocess import signal @@ -258,6 +293,25 @@ def system_emulated(session): os.killpg(os.getpgid(p.pid), signal.SIGKILL) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +@nox.parametrize("client_type", ["async", "sync", "legacy"]) +def conformance(session, client_type): + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + session.run( + "bash", + "-e", + "run_tests.sh", + external=True, + env={"CLIENT_TYPE": client_type}, + ) + + @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" @@ -309,18 +363,27 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") -@nox.session(python="3.9") +@nox.session(python="3.10") def docs(session): """Build the docs for this library.""" session.install("-e", ".") session.install( - "sphinx==4.0.1", + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "sphinx==4.5.0", "alabaster", "recommonmark", ) @@ -340,12 +403,21 @@ def docs(session): ) -@nox.session(python="3.9") +@nox.session(python="3.10") def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", "gcp-sphinx-docfx-yaml", "alabaster", "recommonmark", @@ -375,12 +447,23 @@ def docfx(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + # Customization: Add extra sections to the table of contents for the Classic vs Async clients + session.install("pyyaml") + session.run("python", "docs/scripts/patch_devsite_toc.py") -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def prerelease_deps(session): +@nox.session(python="3.14") +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def prerelease_deps(session, protobuf_implementation): """Run all tests with prerelease versions of dependencies installed.""" + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") + # Install all dependencies session.install("-e", ".[all, tests, tracing]") unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES @@ -415,9 +498,9 @@ def prerelease_deps(session): "protobuf", # dependency of grpc "six", + "grpc-google-iam-v1", "googleapis-common-protos", - # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163 - "grpcio!=1.52.0rc1", + "grpcio", "grpcio-status", "google-api-core", "google-auth", @@ -433,6 +516,7 @@ def prerelease_deps(session): # Remaining dependencies other_deps = [ "requests", + "cryptography", ] session.install(*other_deps) @@ -443,7 +527,13 @@ def prerelease_deps(session): session.run("python", "-c", "import grpc; print(grpc.__version__)") session.run("python", "-c", "import google.auth; print(google.auth.__version__)") - session.run("py.test", "tests/unit") + session.run( + "py.test", + "tests/unit", + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -456,6 +546,9 @@ def prerelease_deps(session): f"--junitxml=system_{session.python}_sponge_log.xml", system_test_path, *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) if os.path.exists(system_test_folder_path): session.run( @@ -464,4 +557,17 @@ def prerelease_deps(session): f"--junitxml=system_{session.python}_sponge_log.xml", system_test_folder_path, *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) + + +@nox.session(python="3.10") +def generate_sync(session): + """ + Re-generate sync files for the library from CrossSync-annotated async source + """ + session.install(BLACK_VERSION) + session.install("autoflake") + session.run("python", ".cross_sync/generate.py", ".") diff --git a/owlbot.py b/owlbot.py deleted file mode 100644 index 78c6ca2f8..000000000 --- a/owlbot.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This script is used to synthesize generated parts of this library.""" - -from pathlib import Path -import re -from typing import List, Optional - -import synthtool as s -from synthtool import gcp -from synthtool.languages import python - -common = gcp.CommonTemplates() - -# This is a customized version of the s.get_staging_dirs() function from synthtool to -# cater for copying 2 different folders from googleapis-gen -# which are bigtable and bigtable/admin. -# Source https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280 -def get_staging_dirs( - default_version: Optional[str] = None, sub_directory: Optional[str] = None -) -> List[Path]: - """Returns the list of directories, one per version, copied from - https://github.com/googleapis/googleapis-gen. Will return in lexical sorting - order with the exception of the default_version which will be last (if specified). - - Args: - default_version (str): the default version of the API. The directory for this version - will be the last item in the returned list if specified. - sub_directory (str): if a `sub_directory` is provided, only the directories within the - specified `sub_directory` will be returned. - - Returns: the empty list if no file were copied. - """ - - staging = Path("owl-bot-staging") - - if sub_directory: - staging /= sub_directory - - if staging.is_dir(): - # Collect the subdirectories of the staging directory. - versions = [v.name for v in staging.iterdir() if v.is_dir()] - # Reorder the versions so the default version always comes last. - versions = [v for v in versions if v != default_version] - versions.sort() - if default_version is not None: - versions += [default_version] - dirs = [staging / v for v in versions] - for dir in dirs: - s._tracked_paths.add(dir) - return dirs - else: - return [] - -# This library ships clients for two different APIs, -# BigTable and BigTable Admin -bigtable_default_version = "v2" -bigtable_admin_default_version = "v2" - -for library in get_staging_dirs(bigtable_default_version, "bigtable"): - s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"]) - s.move(library / "tests") - s.move(library / "scripts") - -for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): - s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"]) - s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"]) - s.move(library / "tests") - s.move(library / "scripts") - -s.remove_staging_dirs() - -# ---------------------------------------------------------------------------- -# Add templated files -# ---------------------------------------------------------------------------- -templated_files = common.py_library( - samples=True, # set to True only if there are samples - split_system_tests=True, - microgenerator=True, - cov_level=100, -) - -s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml"]) - -# ---------------------------------------------------------------------------- -# Customize noxfile.py -# ---------------------------------------------------------------------------- - -def place_before(path, text, *before_text, escape=None): - replacement = "\n".join(before_text) + "\n" + text - if escape: - for c in escape: - text = text.replace(c, '\\' + c) - s.replace([path], text, replacement) - -system_emulated_session = """ -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def system_emulated(session): - import subprocess - import signal - - try: - subprocess.call(["gcloud", "--version"]) - except OSError: - session.skip("gcloud not found but required for emulator support") - - # Currently, CI/CD doesn't have beta component of gcloud. - subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) - - hostport = "localhost:8789" - session.env["BIGTABLE_EMULATOR_HOST"] = hostport - - p = subprocess.Popen( - ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] - ) - - try: - system(session) - finally: - # Stop Emulator - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - -""" - -place_before( - "noxfile.py", - "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" - "def system(session):", - system_emulated_session, - escape="()" -) - -# add system_emulated nox session -s.replace("noxfile.py", - """nox.options.sessions = \[ - "unit", - "system",""", - """nox.options.sessions = [ - "unit", - "system_emulated", - "system", - "mypy",""", -) - - -s.replace( - "noxfile.py", - """\ -@nox.session\(python=DEFAULT_PYTHON_VERSION\) -def lint_setup_py\(session\): -""", - '''\ -@nox.session(python=DEFAULT_PYTHON_VERSION) -def mypy(session): - """Verify type hints are mypy compatible.""" - session.install("-e", ".") - session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") - session.install("google-cloud-testutils") - # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint_setup_py(session): -''', -) - -# ---------------------------------------------------------------------------- -# Samples templates -# ---------------------------------------------------------------------------- - -python.py_samples(skip_readmes=True) - -s.replace( - "samples/beam/noxfile.py", - """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \( - "True", - "true", -\)""", - """# todo(kolea2): temporary workaround to install pinned dep version -INSTALL_LIBRARY_FROM_SOURCE = False""") - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/release-please-config.json b/release-please-config.json deleted file mode 100644 index 33d5a7e21..000000000 --- a/release-please-config.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$schema": -"https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", - "packages": { - ".": { - "release-type": "python", - "extra-files": [ - "google/cloud/bigtable/gapic_version.py", - "google/cloud/bigtable_admin/gapic_version.py", - "google/cloud/bigtable_v2/gapic_version.py", - "google/cloud/bigtable_admin_v2/gapic_version.py" - ] - } - }, - "release-type": "python", - "plugins": [ - { - "type": "sentence-case" - } - ], - "initial-version": "2.13.2" -} diff --git a/releases.md b/releases.md deleted file mode 120000 index 4c43d4932..000000000 --- a/releases.md +++ /dev/null @@ -1 +0,0 @@ -../../bigtable/CHANGELOG.md \ No newline at end of file diff --git a/renovate.json b/renovate.json index 39b2a0ec9..e2175ba2e 100644 --- a/renovate.json +++ b/renovate.json @@ -5,7 +5,7 @@ ":preserveSemverRanges", ":disableDependencyDashboard" ], - "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"], + "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py", ".github/workflows/*"], "pip_requirements": { "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] } diff --git a/samples/snippets/deletes/snapshots/__init__.py b/samples/__init__.py similarity index 100% rename from samples/snippets/deletes/snapshots/__init__.py rename to samples/__init__.py diff --git a/samples/beam/__init__.py b/samples/beam/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/beam/hello_world_write_test.py b/samples/beam/hello_world_write_test.py index 4e9a47c7d..ba0e98096 100644 --- a/samples/beam/hello_world_write_test.py +++ b/samples/beam/hello_world_write_test.py @@ -14,45 +14,33 @@ import os import uuid -from google.cloud import bigtable import pytest -import hello_world_write +from . import hello_world_write +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-beam-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) -def table_id(): - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) +def table(): + with create_table_cm( + PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None} + ) as table: + yield table - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - table.create(column_families={"stats_summary": None}) - yield table_id - - table.delete() - - -def test_hello_world_write(table_id): +def test_hello_world_write(table): hello_world_write.run( [ "--bigtable-project=%s" % PROJECT, "--bigtable-instance=%s" % BIGTABLE_INSTANCE, - "--bigtable-table=%s" % table_id, + "--bigtable-table=%s" % TABLE_ID, ] ) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - rows = table.read_rows() count = 0 for _ in rows: diff --git a/samples/beam/noxfile.py b/samples/beam/noxfile.py index 3d4395024..d0b343a91 100644 --- a/samples/beam/noxfile.py +++ b/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/beam/noxfile_config.py b/samples/beam/noxfile_config.py index eb01435a0..66d7bc5ac 100644 --- a/samples/beam/noxfile_config.py +++ b/samples/beam/noxfile_config.py @@ -23,8 +23,8 @@ TEST_CONFIG_OVERRIDE = { # You can opt out from the test for specific Python versions. "ignored_versions": [ - "2.7", # not supported - "3.10", # Beam wheels not yet released for Python 3.10 + "3.7", # Beam no longer supports Python 3.7 for new releases + "3.12", # Beam not yet supported for Python 3.12 ], # Old samples are opted out of enforcing Python type hints # All new samples should feature them diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt index c4d04a08d..e079f8a60 100644 --- a/samples/beam/requirements-test.txt +++ b/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt index 8be9b98e0..e709a03cb 100644 --- a/samples/beam/requirements.txt +++ b/samples/beam/requirements.txt @@ -1,3 +1,5 @@ -apache-beam==2.46.0 -google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +apache-beam===2.60.0; python_version == '3.8' +apache-beam===2.69.0; python_version == '3.9' +apache-beam==2.71.0; python_version >= '3.10' +google-cloud-bigtable==2.35.0 +google-cloud-core==2.5.0 diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py new file mode 100644 index 000000000..82dafab44 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = await client.create_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py new file mode 100644 index 000000000..82ff382b7 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = client.create_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py new file mode 100644 index 000000000..fb9fac60f --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py new file mode 100644 index 000000000..d8d5f9958 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py new file mode 100644 index 000000000..dbde6c4bc --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py new file mode 100644 index 000000000..83ec90e53 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py new file mode 100644 index 000000000..6dfb1d612 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py new file mode 100644 index 000000000..f0214acbf --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py new file mode 100644 index 000000000..30481d2f3 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py new file mode 100644 index 000000000..45116fb49 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py new file mode 100644 index 000000000..76d272519 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + await client.delete_app_profile(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py new file mode 100644 index 000000000..47f552fb8 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + client.delete_app_profile(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py new file mode 100644 index 000000000..6f97b6a5e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + await client.delete_cluster(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py new file mode 100644 index 000000000..d058a08e6 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + client.delete_cluster(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py new file mode 100644 index 000000000..ecf5583be --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + await client.delete_instance(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py new file mode 100644 index 000000000..e8f568486 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + client.delete_instance(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py new file mode 100644 index 000000000..93f9d8ce8 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_logical_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py new file mode 100644 index 000000000..fdece2bbc --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + client.delete_logical_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py new file mode 100644 index 000000000..22a9f0ad4 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_materialized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py new file mode 100644 index 000000000..b6cf3a453 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_materialized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py new file mode 100644 index 000000000..3a59ca599 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py new file mode 100644 index 000000000..2e54bfcad --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = client.get_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py new file mode 100644 index 000000000..b4d89a11d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py new file mode 100644 index 000000000..25a80a871 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py new file mode 100644 index 000000000..b2e479c11 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py new file mode 100644 index 000000000..ffb2a81b0 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py new file mode 100644 index 000000000..b76fac83a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py new file mode 100644 index 000000000..711ed99a5 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = client.get_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py new file mode 100644 index 000000000..4ce25cdda --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_logical_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py new file mode 100644 index 000000000..daaf7fa63 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_logical_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py new file mode 100644 index 000000000..165fb262c --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_materialized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py new file mode 100644 index 000000000..1f94e3954 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_materialized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py new file mode 100644 index 000000000..d377fc678 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAppProfiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py new file mode 100644 index 000000000..07f49ba39 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAppProfiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py new file mode 100644 index 000000000..71532d98a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_clusters(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py new file mode 100644 index 000000000..1c36c098d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_clusters(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py new file mode 100644 index 000000000..cb6d58847 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHotTablets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py new file mode 100644 index 000000000..5add7715d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHotTablets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py new file mode 100644 index 000000000..91c9a8230 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_instances(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py new file mode 100644 index 000000000..bbe708c0e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_instances(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py new file mode 100644 index 000000000..8de9bd06e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListLogicalViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py new file mode 100644 index 000000000..b5fb602cd --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListLogicalViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py new file mode 100644 index 000000000..6fa672a25 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMaterializedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py new file mode 100644 index 000000000..5a25da88a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMaterializedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py new file mode 100644 index 000000000..dab73b9cb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py new file mode 100644 index 000000000..bab63c6ed --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py new file mode 100644 index 000000000..4c5e53ebe --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py new file mode 100644 index 000000000..0d2a74cfc --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py new file mode 100644 index 000000000..b389b7679 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py new file mode 100644 index 000000000..97bc29d65 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py new file mode 100644 index 000000000..977f79d9b --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py new file mode 100644 index 000000000..db047d367 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py new file mode 100644 index 000000000..2c55a45bd --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py new file mode 100644 index 000000000..a7b683426 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py new file mode 100644 index 000000000..af3abde41 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py new file mode 100644 index 000000000..ec02a64af --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py new file mode 100644 index 000000000..798afaf80 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = await client.update_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py new file mode 100644 index 000000000..fb6e5e2d3 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = client.update_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py new file mode 100644 index 000000000..9bdd620e6 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py new file mode 100644 index 000000000..10d962205 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py new file mode 100644 index 000000000..ddd930475 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py new file mode 100644 index 000000000..a2ef78bd3 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py new file mode 100644 index 000000000..4cd57edc8 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckConsistency +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = await client.check_consistency(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py new file mode 100644 index 000000000..ec6085b3f --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckConsistency +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = client.check_consistency(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py new file mode 100644 index 000000000..9355b7d44 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CopyBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py new file mode 100644 index 000000000..25456ad21 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CopyBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py new file mode 100644 index 000000000..135bbe220 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py new file mode 100644 index 000000000..cafbf56cb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py new file mode 100644 index 000000000..d9bd402b4 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py new file mode 100644 index 000000000..835f0573c --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py new file mode 100644 index 000000000..8e4992635 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py new file mode 100644 index 000000000..a5911497d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py new file mode 100644 index 000000000..3096539b9 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = await client.create_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py new file mode 100644 index 000000000..f7767438e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTableFromSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py new file mode 100644 index 000000000..ff1dd7899 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTableFromSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py new file mode 100644 index 000000000..552a1095f --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = client.create_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py new file mode 100644 index 000000000..cbee06ae1 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_authorized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py new file mode 100644 index 000000000..298e66efb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_authorized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py new file mode 100644 index 000000000..d2615f792 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + await client.delete_backup(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py new file mode 100644 index 000000000..c9888bf39 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + client.delete_backup(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py new file mode 100644 index 000000000..7377299d1 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + await client.delete_schema_bundle(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py new file mode 100644 index 000000000..5dc12b464 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + client.delete_schema_bundle(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py new file mode 100644 index 000000000..eb8ca8166 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + await client.delete_snapshot(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py new file mode 100644 index 000000000..ad979615d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + client.delete_snapshot(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py new file mode 100644 index 000000000..375e61557 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + await client.delete_table(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py new file mode 100644 index 000000000..17397bfab --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + client.delete_table(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py new file mode 100644 index 000000000..391205c7c --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DropRowRange +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + await client.drop_row_range(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py new file mode 100644 index 000000000..bcd528f1a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DropRowRange +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + client.drop_row_range(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py new file mode 100644 index 000000000..1953441b6 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateConsistencyToken +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = await client.generate_consistency_token(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py new file mode 100644 index 000000000..4ae52264d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateConsistencyToken +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = client.generate_consistency_token(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py new file mode 100644 index 000000000..129948bc5 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_authorized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py new file mode 100644 index 000000000..9cc63538c --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_authorized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py new file mode 100644 index 000000000..524d63e86 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py new file mode 100644 index 000000000..5ed91b80c --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = client.get_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py new file mode 100644 index 000000000..a599239d5 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py new file mode 100644 index 000000000..2d6e71c27 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py new file mode 100644 index 000000000..b5e580276 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = await client.get_schema_bundle(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py new file mode 100644 index 000000000..1ea7b69b7 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = client.get_schema_bundle(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py new file mode 100644 index 000000000..ae48060bb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = await client.get_snapshot(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py new file mode 100644 index 000000000..8626549fd --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = client.get_snapshot(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py new file mode 100644 index 000000000..ff8dff1ae --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py new file mode 100644 index 000000000..ccb68b766 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = client.get_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py new file mode 100644 index 000000000..658b8f96a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAuthorizedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py new file mode 100644 index 000000000..a7bf4b6ad --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAuthorizedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py new file mode 100644 index 000000000..368c376f0 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBackups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py new file mode 100644 index 000000000..ca0e3e0f2 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBackups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py new file mode 100644 index 000000000..3daf30e6d --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSchemaBundles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py new file mode 100644 index 000000000..945d606bb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSchemaBundles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py new file mode 100644 index 000000000..91acb1d9e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSnapshots +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py new file mode 100644 index 000000000..7f809156f --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSnapshots +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py new file mode 100644 index 000000000..191de0fc7 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTables +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py new file mode 100644 index 000000000..5d0f3a278 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTables +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py new file mode 100644 index 000000000..2c206eb44 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModifyColumnFamilies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = await client.modify_column_families(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py new file mode 100644 index 000000000..6224f5c5e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModifyColumnFamilies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = client.modify_column_families(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py new file mode 100644 index 000000000..f70b5da17 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py new file mode 100644 index 000000000..45621c22b --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py new file mode 100644 index 000000000..cbfafdc77 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py new file mode 100644 index 000000000..9a6c5fcc2 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py new file mode 100644 index 000000000..6ff619e85 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SnapshotTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py new file mode 100644 index 000000000..f983f7824 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SnapshotTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py new file mode 100644 index 000000000..ee5fe6027 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py new file mode 100644 index 000000000..46f0870b0 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py new file mode 100644 index 000000000..1e2f6aa5a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py new file mode 100644 index 000000000..637afee8b --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py new file mode 100644 index 000000000..541427d48 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py new file mode 100644 index 000000000..9c8198d9a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py new file mode 100644 index 000000000..f98e1e33a --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = await client.update_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py new file mode 100644 index 000000000..466a3decb --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = client.update_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py new file mode 100644 index 000000000..96447088e --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py new file mode 100644 index 000000000..075683060 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py new file mode 100644 index 000000000..93839d36f --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] diff --git a/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py new file mode 100644 index 000000000..fea09f6a8 --- /dev/null +++ b/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] diff --git a/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json new file mode 100644 index 000000000..42db3b70b --- /dev/null +++ b/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json @@ -0,0 +1,10871 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.bigtable.admin.v2", + "version": "v2" + } + ], + "language": "PYTHON", + "name": "google-cloud-bigtable-admin", + "version": "2.35.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "app_profile_id", + "type": "str" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "create_app_profile" + }, + "description": "Sample for CreateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "app_profile_id", + "type": "str" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "create_app_profile" + }, + "description": "Sample for CreateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "instance_id", + "type": "str" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "clusters", + "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_instance" + }, + "description": "Sample for CreateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "instance_id", + "type": "str" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "clusters", + "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_instance" + }, + "description": "Sample for CreateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "logical_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_logical_view" + }, + "description": "Sample for CreateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "logical_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_logical_view" + }, + "description": "Sample for CreateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "materialized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_materialized_view" + }, + "description": "Sample for CreateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "materialized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_materialized_view" + }, + "description": "Sample for CreateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "ignore_warnings", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_app_profile" + }, + "description": "Sample for DeleteAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "ignore_warnings", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_app_profile" + }, + "description": "Sample for DeleteAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_instance" + }, + "description": "Sample for DeleteInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_instance" + }, + "description": "Sample for DeleteInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_logical_view" + }, + "description": "Sample for DeleteLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_logical_view" + }, + "description": "Sample for DeleteLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_materialized_view" + }, + "description": "Sample for DeleteMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_materialized_view" + }, + "description": "Sample for DeleteMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "get_app_profile" + }, + "description": "Sample for GetAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "get_app_profile" + }, + "description": "Sample for GetAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "get_instance" + }, + "description": "Sample for GetInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "get_instance" + }, + "description": "Sample for GetInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", + "shortName": "get_logical_view" + }, + "description": "Sample for GetLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", + "shortName": "get_logical_view" + }, + "description": "Sample for GetLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", + "shortName": "get_materialized_view" + }, + "description": "Sample for GetMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", + "shortName": "get_materialized_view" + }, + "description": "Sample for GetMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_app_profiles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListAppProfiles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager", + "shortName": "list_app_profiles" + }, + "description": "Sample for ListAppProfiles", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_app_profiles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListAppProfiles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager", + "shortName": "list_app_profiles" + }, + "description": "Sample for ListAppProfiles", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_clusters", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_clusters", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_hot_tablets", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListHotTablets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager", + "shortName": "list_hot_tablets" + }, + "description": "Sample for ListHotTablets", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_hot_tablets", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListHotTablets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager", + "shortName": "list_hot_tablets" + }, + "description": "Sample for ListHotTablets", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_instances", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListInstances" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", + "shortName": "list_instances" + }, + "description": "Sample for ListInstances", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_instances", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListInstances" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", + "shortName": "list_instances" + }, + "description": "Sample for ListInstances", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_logical_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListLogicalViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager", + "shortName": "list_logical_views" + }, + "description": "Sample for ListLogicalViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_logical_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListLogicalViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager", + "shortName": "list_logical_views" + }, + "description": "Sample for ListLogicalViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_materialized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListMaterializedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager", + "shortName": "list_materialized_views" + }, + "description": "Sample for ListMaterializedViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_materialized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListMaterializedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager", + "shortName": "list_materialized_views" + }, + "description": "Sample for ListMaterializedViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "partial_update_cluster" + }, + "description": "Sample for PartialUpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "partial_update_cluster" + }, + "description": "Sample for PartialUpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "partial_update_instance" + }, + "description": "Sample for PartialUpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "partial_update_instance" + }, + "description": "Sample for PartialUpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_app_profile" + }, + "description": "Sample for UpdateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_app_profile" + }, + "description": "Sample for UpdateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "update_instance" + }, + "description": "Sample for UpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "update_instance" + }, + "description": "Sample for UpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_logical_view" + }, + "description": "Sample for UpdateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_logical_view" + }, + "description": "Sample for UpdateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_materialized_view" + }, + "description": "Sample for UpdateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_materialized_view" + }, + "description": "Sample for UpdateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.check_consistency", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CheckConsistency" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "consistency_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", + "shortName": "check_consistency" + }, + "description": "Sample for CheckConsistency", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.check_consistency", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CheckConsistency" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "consistency_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", + "shortName": "check_consistency" + }, + "description": "Sample for CheckConsistency", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.copy_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CopyBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "source_backup", + "type": "str" + }, + { + "name": "expire_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "copy_backup" + }, + "description": "Sample for CopyBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.copy_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CopyBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "source_backup", + "type": "str" + }, + { + "name": "expire_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "copy_backup" + }, + "description": "Sample for CopyBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "authorized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_authorized_view" + }, + "description": "Sample for CreateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "authorized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_authorized_view" + }, + "description": "Sample for CreateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_backup" + }, + "description": "Sample for CreateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_backup" + }, + "description": "Sample for CreateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schema_bundle_id", + "type": "str" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_schema_bundle" + }, + "description": "Sample for CreateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schema_bundle_id", + "type": "str" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_schema_bundle" + }, + "description": "Sample for CreateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table_from_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTableFromSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "source_snapshot", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_table_from_snapshot" + }, + "description": "Sample for CreateTableFromSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTableFromSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "source_snapshot", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_table_from_snapshot" + }, + "description": "Sample for CreateTableFromSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "create_table" + }, + "description": "Sample for CreateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "create_table" + }, + "description": "Sample for CreateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_authorized_view" + }, + "description": "Sample for DeleteAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_authorized_view" + }, + "description": "Sample for DeleteAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_backup" + }, + "description": "Sample for DeleteBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_backup" + }, + "description": "Sample for DeleteBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_schema_bundle" + }, + "description": "Sample for DeleteSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_schema_bundle" + }, + "description": "Sample for DeleteSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_snapshot" + }, + "description": "Sample for DeleteSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_snapshot" + }, + "description": "Sample for DeleteSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_table" + }, + "description": "Sample for DeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_table" + }, + "description": "Sample for DeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.drop_row_range", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DropRowRange" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "drop_row_range" + }, + "description": "Sample for DropRowRange", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.drop_row_range", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DropRowRange" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "drop_row_range" + }, + "description": "Sample for DropRowRange", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.generate_consistency_token", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GenerateConsistencyToken" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", + "shortName": "generate_consistency_token" + }, + "description": "Sample for GenerateConsistencyToken", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GenerateConsistencyToken" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", + "shortName": "generate_consistency_token" + }, + "description": "Sample for GenerateConsistencyToken", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", + "shortName": "get_authorized_view" + }, + "description": "Sample for GetAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", + "shortName": "get_authorized_view" + }, + "description": "Sample for GetAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "get_backup" + }, + "description": "Sample for GetBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "get_backup" + }, + "description": "Sample for GetBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", + "shortName": "get_schema_bundle" + }, + "description": "Sample for GetSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", + "shortName": "get_schema_bundle" + }, + "description": "Sample for GetSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", + "shortName": "get_snapshot" + }, + "description": "Sample for GetSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", + "shortName": "get_snapshot" + }, + "description": "Sample for GetSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "get_table" + }, + "description": "Sample for GetTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "get_table" + }, + "description": "Sample for GetTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_authorized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListAuthorizedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager", + "shortName": "list_authorized_views" + }, + "description": "Sample for ListAuthorizedViews", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_authorized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListAuthorizedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager", + "shortName": "list_authorized_views" + }, + "description": "Sample for ListAuthorizedViews", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_backups", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager", + "shortName": "list_backups" + }, + "description": "Sample for ListBackups", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_backups", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager", + "shortName": "list_backups" + }, + "description": "Sample for ListBackups", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_schema_bundles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSchemaBundles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager", + "shortName": "list_schema_bundles" + }, + "description": "Sample for ListSchemaBundles", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_schema_bundles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSchemaBundles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager", + "shortName": "list_schema_bundles" + }, + "description": "Sample for ListSchemaBundles", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_snapshots", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSnapshots" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager", + "shortName": "list_snapshots" + }, + "description": "Sample for ListSnapshots", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_snapshots", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSnapshots" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager", + "shortName": "list_snapshots" + }, + "description": "Sample for ListSnapshots", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_tables", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListTables" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager", + "shortName": "list_tables" + }, + "description": "Sample for ListTables", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_tables", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListTables" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager", + "shortName": "list_tables" + }, + "description": "Sample for ListTables", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.modify_column_families", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ModifyColumnFamilies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "modifications", + "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "modify_column_families" + }, + "description": "Sample for ModifyColumnFamilies", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.modify_column_families", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ModifyColumnFamilies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "modifications", + "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "modify_column_families" + }, + "description": "Sample for ModifyColumnFamilies", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._restore_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "RestoreTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "_restore_table" + }, + "description": "Sample for RestoreTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._restore_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "RestoreTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "_restore_table" + }, + "description": "Sample for RestoreTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.snapshot_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SnapshotTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "cluster", + "type": "str" + }, + { + "name": "snapshot_id", + "type": "str" + }, + { + "name": "description", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "snapshot_table" + }, + "description": "Sample for SnapshotTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.snapshot_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SnapshotTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "cluster", + "type": "str" + }, + { + "name": "snapshot_id", + "type": "str" + }, + { + "name": "description", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "snapshot_table" + }, + "description": "Sample for SnapshotTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.undelete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UndeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undelete_table" + }, + "description": "Sample for UndeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.undelete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UndeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undelete_table" + }, + "description": "Sample for UndeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_authorized_view" + }, + "description": "Sample for UpdateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_authorized_view" + }, + "description": "Sample for UpdateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "update_backup" + }, + "description": "Sample for UpdateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "update_backup" + }, + "description": "Sample for UpdateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_schema_bundle" + }, + "description": "Sample for UpdateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_schema_bundle" + }, + "description": "Sample for UpdateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_table" + }, + "description": "Sample for UpdateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_table" + }, + "description": "Sample for UpdateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py" + } + ] +} diff --git a/samples/hello/__init__.py b/samples/hello/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/hello/async_main.py b/samples/hello/async_main.py new file mode 100644 index 000000000..e134e28d0 --- /dev/null +++ b/samples/hello/async_main.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations with the async APIs + +Prerequisites: + +- Create a Cloud Bigtable instance. + https://cloud.google.com/bigtable/docs/creating-instance +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials +""" + +import argparse +import asyncio +from ..utils import wait_for_table + +# [START bigtable_async_hw_imports] +from google.cloud import bigtable +from google.cloud.bigtable.data import row_filters +# [END bigtable_async_hw_imports] + +# use to ignore warnings +row_filters + + +async def main(project_id, instance_id, table_id): + # [START bigtable_async_hw_connect] + client = bigtable.data.BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + # [END bigtable_async_hw_connect] + + # [START bigtable_async_hw_create_table] + from google.cloud.bigtable import column_family + + # the async client only supports the data API. Table creation as an admin operation + # use admin client to create the table + print("Creating the {} table.".format(table_id)) + admin_client = bigtable.Client(project=project_id, admin=True) + admin_instance = admin_client.instance(instance_id) + admin_table = admin_instance.table(table_id) + + print("Creating column family cf1 with Max Version GC rule...") + # Create a column family with GC policy : most recent N versions + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + column_family_id = b"cf1" + column_families = {column_family_id: max_versions_rule} + if not admin_table.exists(): + admin_table.create(column_families=column_families) + else: + print("Table {} already exists.".format(table_id)) + # [END bigtable_async_hw_create_table] + + try: + # let table creation complete + wait_for_table(admin_table) + # [START bigtable_async_hw_write_rows] + print("Writing some greetings to the table.") + greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"] + mutations = [] + column = b"greeting" + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # We recommend that you use bytestrings directly for row keys + # where possible, rather than encoding strings. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = f"greeting{i}".encode() + row_mutation = bigtable.data.RowMutationEntry( + row_key, bigtable.data.SetCell(column_family_id, column, value) + ) + mutations.append(row_mutation) + await table.bulk_mutate_rows(mutations) + # [END bigtable_async_hw_write_rows] + + # [START bigtable_async_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column across entire row. + row_filter = bigtable.data.row_filters.CellsColumnLimitFilter(1) + # [END bigtable_async_hw_create_filter] + + # [START bigtable_async_hw_get_with_filter] + # [START bigtable_async_hw_get_by_key] + print("Getting a single greeting by row key.") + key = "greeting0".encode() + + row = await table.read_row(key, row_filter=row_filter) + cell = row.cells[0] + print(cell.value.decode("utf-8")) + # [END bigtable_async_hw_get_by_key] + # [END bigtable_async_hw_get_with_filter] + + # [START bigtable_async_hw_scan_with_filter] + # [START bigtable_async_hw_scan_all] + print("Scanning for all greetings:") + query = bigtable.data.ReadRowsQuery(row_filter=row_filter) + async for row in await table.read_rows_stream(query): + cell = row.cells[0] + print(cell.value.decode("utf-8")) + # [END bigtable_async_hw_scan_all] + # [END bigtable_async_hw_scan_with_filter] + finally: + # [START bigtable_async_hw_delete_table] + # the async client only supports the data API. Table deletion as an admin operation + # use admin client to create the table + print("Deleting the {} table.".format(table_id)) + admin_table.delete() + # [END bigtable_async_hw_delete_table] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") + parser.add_argument( + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) + parser.add_argument( + "--table", help="Table to create and destroy.", default="Hello-Bigtable" + ) + + args = parser.parse_args() + asyncio.run(main(args.project_id, args.instance_id, args.table)) diff --git a/samples/hello/async_main_test.py b/samples/hello/async_main_test.py new file mode 100644 index 000000000..aa65a8652 --- /dev/null +++ b/samples/hello/async_main_test.py @@ -0,0 +1,36 @@ +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import asyncio +import uuid + +from .async_main import main + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID = f"hello-world-test-async-{str(uuid.uuid4())[:16]}" + + +def test_async_main(capsys): + asyncio.run(main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID)) + + out, _ = capsys.readouterr() + assert "Creating the {} table.".format(TABLE_ID) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(TABLE_ID) in out diff --git a/samples/hello/main.py b/samples/hello/main.py index 5e47b4a38..2c0d83f98 100644 --- a/samples/hello/main.py +++ b/samples/hello/main.py @@ -18,16 +18,17 @@ Prerequisites: -- Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster +- Create a Cloud Bigtable instance. + https://cloud.google.com/bigtable/docs/creating-instance - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default-credentials """ import argparse +from ..utils import wait_for_table # [START bigtable_hw_imports] -import datetime +from datetime import datetime, timezone from google.cloud import bigtable from google.cloud.bigtable import column_family @@ -35,6 +36,10 @@ # [END bigtable_hw_imports] +# use to avoid warnings +row_filters +column_family + def main(project_id, instance_id, table_id): # [START bigtable_hw_connect] @@ -51,8 +56,8 @@ def main(project_id, instance_id, table_id): print("Creating column family cf1 with Max Version GC rule...") # Create a column family with GC policy : most recent N versions # Define the GC policy to retain only the most recent 2 versions - max_versions_rule = column_family.MaxVersionsGCRule(2) - column_family_id = "cf1" + max_versions_rule = bigtable.column_family.MaxVersionsGCRule(2) + column_family_id = b"cf1" column_families = {column_family_id: max_versions_rule} if not table.exists(): table.create(column_families=column_families) @@ -60,63 +65,72 @@ def main(project_id, instance_id, table_id): print("Table {} already exists.".format(table_id)) # [END bigtable_hw_create_table] - # [START bigtable_hw_write_rows] - print("Writing some greetings to the table.") - greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] - rows = [] - column = "greeting".encode() - for i, value in enumerate(greetings): - # Note: This example uses sequential numeric IDs for simplicity, - # but this can result in poor performance in a production - # application. Since rows are stored in sorted order by key, - # sequential keys can result in poor distribution of operations - # across nodes. - # - # For more information about how to design a Bigtable schema for - # the best performance, see the documentation: - # - # https://cloud.google.com/bigtable/docs/schema-design - row_key = "greeting{}".format(i).encode() - row = table.direct_row(row_key) - row.set_cell( - column_family_id, column, value, timestamp=datetime.datetime.utcnow() - ) - rows.append(row) - table.mutate_rows(rows) - # [END bigtable_hw_write_rows] - - # [START bigtable_hw_create_filter] - # Create a filter to only retrieve the most recent version of the cell - # for each column across entire row. - row_filter = row_filters.CellsColumnLimitFilter(1) - # [END bigtable_hw_create_filter] - - # [START bigtable_hw_get_with_filter] - # [START bigtable_hw_get_by_key] - print("Getting a single greeting by row key.") - key = "greeting0".encode() - - row = table.read_row(key, row_filter) - cell = row.cells[column_family_id][column][0] - print(cell.value.decode("utf-8")) - # [END bigtable_hw_get_by_key] - # [END bigtable_hw_get_with_filter] - - # [START bigtable_hw_scan_with_filter] - # [START bigtable_hw_scan_all] - print("Scanning for all greetings:") - partial_rows = table.read_rows(filter_=row_filter) - - for row in partial_rows: - cell = row.cells[column_family_id][column][0] + try: + # let table creation complete + wait_for_table(table) + + # [START bigtable_hw_write_rows] + print("Writing some greetings to the table.") + greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"] + rows = [] + column = b"greeting" + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # We recommend that you use bytestrings directly for row keys + # where possible, rather than encoding strings. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = f"greeting{i}".encode() + row = table.direct_row(row_key) + row.set_cell( + column_family_id, column, value, timestamp=datetime.now(timezone.utc), + ) + rows.append(row) + table.mutate_rows(rows) + # [END bigtable_hw_write_rows] + + # [START bigtable_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column across entire row. + row_filter = bigtable.row_filters.CellsColumnLimitFilter(1) + # [END bigtable_hw_create_filter] + + # [START bigtable_hw_get_with_filter] + # [START bigtable_hw_get_by_key] + print("Getting a single greeting by row key.") + key = b"greeting0" + + row = table.read_row(key, row_filter) + cell = row.cells[column_family_id.decode("utf-8")][column][0] print(cell.value.decode("utf-8")) - # [END bigtable_hw_scan_all] - # [END bigtable_hw_scan_with_filter] - - # [START bigtable_hw_delete_table] - print("Deleting the {} table.".format(table_id)) - table.delete() - # [END bigtable_hw_delete_table] + # [END bigtable_hw_get_by_key] + # [END bigtable_hw_get_with_filter] + + # [START bigtable_hw_scan_with_filter] + # [START bigtable_hw_scan_all] + print("Scanning for all greetings:") + partial_rows = table.read_rows(filter_=row_filter) + + for row in partial_rows: + column_family_id_str = column_family_id.decode("utf-8") + cell = row.cells[column_family_id_str][column][0] + print(cell.value.decode("utf-8")) + # [END bigtable_hw_scan_all] + # [END bigtable_hw_scan_with_filter] + + finally: + # [START bigtable_hw_delete_table] + print("Deleting the {} table.".format(table_id)) + table.delete() + # [END bigtable_hw_delete_table] if __name__ == "__main__": diff --git a/samples/hello/main_test.py b/samples/hello/main_test.py index 641b34d11..28814d909 100644 --- a/samples/hello/main_test.py +++ b/samples/hello/main_test.py @@ -13,26 +13,23 @@ # limitations under the License. import os -import random +import uuid -from main import main +from .main import main PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_NAME_FORMAT = "hello-world-test-{}" -TABLE_NAME_RANGE = 10000 +TABLE_ID = f"hello-world-test-{str(uuid.uuid4())[:16]}" def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) - - main(PROJECT, BIGTABLE_INSTANCE, table_name) + main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) out, _ = capsys.readouterr() - assert "Creating the {} table.".format(table_name) in out + assert "Creating the {} table.".format(TABLE_ID) in out assert "Writing some greetings to the table." in out assert "Getting a single greeting by row key." in out assert "Hello World!" in out assert "Scanning for all greetings" in out assert "Hello Cloud Bigtable!" in out - assert "Deleting the {} table.".format(table_name) in out + assert "Deleting the {} table.".format(TABLE_ID) in out diff --git a/samples/hello/noxfile.py b/samples/hello/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/hello/noxfile.py +++ b/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt index c4d04a08d..e079f8a60 100644 --- a/samples/hello/requirements-test.txt +++ b/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt index 199541ffe..5113ca7f1 100644 --- a/samples/hello/requirements.txt +++ b/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +google-cloud-bigtable==2.35.0 +google-cloud-core==2.5.0 diff --git a/samples/hello_happybase/__init__.py b/samples/hello_happybase/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/hello_happybase/main.py b/samples/hello_happybase/main.py index 7999fd006..50820febd 100644 --- a/samples/hello_happybase/main.py +++ b/samples/hello_happybase/main.py @@ -25,6 +25,7 @@ """ import argparse +from ..utils import wait_for_table # [START bigtable_hw_imports_happybase] from google.cloud import bigtable @@ -51,6 +52,8 @@ def main(project_id, instance_id, table_name): ) # [END bigtable_hw_create_table_happybase] + wait_for_table(instance.table(table_name)) + # [START bigtable_hw_write_rows_happybase] print("Writing some greetings to the table.") table = connection.table(table_name) @@ -90,12 +93,11 @@ def main(project_id, instance_id, table_name): print("\t{}: {}".format(key, row[column_name.encode("utf-8")])) # [END bigtable_hw_scan_all_happybase] + finally: # [START bigtable_hw_delete_table_happybase] print("Deleting the {} table.".format(table_name)) connection.delete_table(table_name) # [END bigtable_hw_delete_table_happybase] - - finally: connection.close() diff --git a/samples/hello_happybase/main_test.py b/samples/hello_happybase/main_test.py index 6a63750da..252f4ccaf 100644 --- a/samples/hello_happybase/main_test.py +++ b/samples/hello_happybase/main_test.py @@ -13,25 +13,32 @@ # limitations under the License. import os -import random +import uuid -from main import main +from .main import main +from google.cloud import bigtable PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_NAME_FORMAT = "hello-world-hb-test-{}" -TABLE_NAME_RANGE = 10000 +TABLE_ID = f"hello-world-hb-test-{str(uuid.uuid4())[:16]}" def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) - main(PROJECT, BIGTABLE_INSTANCE, table_name) + try: + main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) - out, _ = capsys.readouterr() - assert "Creating the {} table.".format(table_name) in out - assert "Writing some greetings to the table." in out - assert "Getting a single greeting by row key." in out - assert "Hello World!" in out - assert "Scanning for all greetings" in out - assert "Hello Cloud Bigtable!" in out - assert "Deleting the {} table.".format(table_name) in out + out, _ = capsys.readouterr() + assert "Creating the {} table.".format(TABLE_ID) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(TABLE_ID) in out + finally: + # delete table + client = bigtable.Client(PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(TABLE_ID) + if table.exists(): + table.delete() diff --git a/samples/hello_happybase/noxfile.py b/samples/hello_happybase/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/hello_happybase/noxfile.py +++ b/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt index c4d04a08d..e079f8a60 100644 --- a/samples/hello_happybase/requirements-test.txt +++ b/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest diff --git a/samples/hello_happybase/requirements.txt b/samples/hello_happybase/requirements.txt index d3368cd0f..dc1a04f30 100644 --- a/samples/hello_happybase/requirements.txt +++ b/samples/hello_happybase/requirements.txt @@ -1,2 +1,2 @@ google-cloud-happybase==0.33.0 -six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 +six==1.17.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/samples/instanceadmin/noxfile.py b/samples/instanceadmin/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/instanceadmin/noxfile.py +++ b/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt index c4d04a08d..e079f8a60 100644 --- a/samples/instanceadmin/requirements-test.txt +++ b/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt index 04e476254..67a1ea5b8 100644 --- a/samples/instanceadmin/requirements.txt +++ b/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.35.0 backoff==2.2.1 diff --git a/samples/metricscaler/noxfile.py b/samples/metricscaler/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/metricscaler/noxfile.py +++ b/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt index 761227068..d11108b81 100644 --- a/samples/metricscaler/requirements-test.txt +++ b/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.3.1 -mock==5.0.2 +pytest +mock==5.2.0 google-cloud-testutils diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt index 02e08b4c8..257fd1ef6 100644 --- a/samples/metricscaler/requirements.txt +++ b/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.2 +google-cloud-bigtable==2.35.0 +google-cloud-monitoring==2.29.0 diff --git a/samples/quickstart/__init__.py b/samples/quickstart/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/quickstart/main_async.py b/samples/quickstart/main_async.py new file mode 100644 index 000000000..c38985592 --- /dev/null +++ b/samples/quickstart/main_async.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_quickstart_asyncio] +import argparse +import asyncio + +from google.cloud.bigtable.data import BigtableDataClientAsync + + +async def main(project_id="project-id", instance_id="instance-id", table_id="my-table"): + # Create a Cloud Bigtable client. + client = BigtableDataClientAsync(project=project_id) + + # Open an existing table. + table = client.get_table(instance_id, table_id) + + row_key = "r1" + row = await table.read_row(row_key) + + column_family_id = "cf1" + column_id = b"c1" + value = row.get_cells(column_family_id, column_id)[0].value.decode("utf-8") + + await table.close() + await client.close() + + print("Row key: {}\nData: {}".format(row_key, value)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") + parser.add_argument( + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) + parser.add_argument( + "--table", help="Existing table used in the quickstart.", default="my-table" + ) + + args = parser.parse_args() + asyncio.get_event_loop().run_until_complete( + main(args.project_id, args.instance_id, args.table) + ) + +# [END bigtable_quickstart_asyncio] diff --git a/samples/quickstart/main_async_test.py b/samples/quickstart/main_async_test.py new file mode 100644 index 000000000..0749cbd31 --- /dev/null +++ b/samples/quickstart/main_async_test.py @@ -0,0 +1,49 @@ +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid +from typing import AsyncGenerator + +from google.cloud.bigtable.data import BigtableDataClientAsync, SetCell +import pytest +import pytest_asyncio + +from .main_async import main +from ..utils import create_table_cm + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID = f"quickstart-async-test-{str(uuid.uuid4())[:16]}" + + +@pytest_asyncio.fixture +async def table_id() -> AsyncGenerator[str, None]: + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"cf1": None}): + await _populate_table(TABLE_ID) + yield TABLE_ID + + +async def _populate_table(table_id: str): + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + await table.mutate_row("r1", SetCell("cf1", "c1", "test-value")) + + +@pytest.mark.asyncio +async def test_main(capsys, table_id): + await main(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + assert "Row key: r1\nData: test-value\n" in out diff --git a/samples/quickstart/main_test.py b/samples/quickstart/main_test.py index 46d578b6b..f58161f23 100644 --- a/samples/quickstart/main_test.py +++ b/samples/quickstart/main_test.py @@ -14,35 +14,28 @@ import os import uuid - -from google.cloud import bigtable import pytest -from main import main +from .main import main + +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "quickstart-test-{}" +TABLE_ID = f"quickstart-test-{str(uuid.uuid4())[:16]}" @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) column_family_id = "cf1" column_families = {column_family_id: None} - table.create(column_families=column_families) - - row = table.direct_row("r1") - row.set_cell(column_family_id, "c1", "test-value") - row.commit() - - yield table_id + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_families) as table: + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() - table.delete() + yield TABLE_ID def test_main(capsys, table): diff --git a/samples/quickstart/noxfile.py b/samples/quickstart/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/quickstart/noxfile.py +++ b/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt index c4d04a08d..ee4ba0186 100644 --- a/samples/quickstart/requirements-test.txt +++ b/samples/quickstart/requirements-test.txt @@ -1 +1,2 @@ -pytest==7.3.1 +pytest +pytest-asyncio diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt index 909f8c365..730d25dec 100644 --- a/samples/quickstart/requirements.txt +++ b/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.35.0 diff --git a/samples/quickstart_happybase/__init__.py b/samples/quickstart_happybase/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/quickstart_happybase/main_test.py b/samples/quickstart_happybase/main_test.py index dc62ebede..343ec800a 100644 --- a/samples/quickstart_happybase/main_test.py +++ b/samples/quickstart_happybase/main_test.py @@ -14,35 +14,26 @@ import os import uuid - -from google.cloud import bigtable import pytest -from main import main - +from .main import main +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "quickstart-hb-test-{}" +TABLE_ID = f"quickstart-hb-test-{str(uuid.uuid4())[:16]}" @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) column_family_id = "cf1" column_families = {column_family_id: None} - table.create(column_families=column_families) - - row = table.direct_row("r1") - row.set_cell(column_family_id, "c1", "test-value") - row.commit() - - yield table_id + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_families) as table: + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() - table.delete() + yield TABLE_ID def test_main(capsys, table): diff --git a/samples/quickstart_happybase/noxfile.py b/samples/quickstart_happybase/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/quickstart_happybase/noxfile.py +++ b/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt index c4d04a08d..55b033e90 100644 --- a/samples/quickstart_happybase/requirements-test.txt +++ b/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest \ No newline at end of file diff --git a/samples/quickstart_happybase/requirements.txt b/samples/quickstart_happybase/requirements.txt index d3368cd0f..dc1a04f30 100644 --- a/samples/quickstart_happybase/requirements.txt +++ b/samples/quickstart_happybase/requirements.txt @@ -1,2 +1,2 @@ google-cloud-happybase==0.33.0 -six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 +six==1.17.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/samples/snippets/__init__.py b/samples/snippets/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/snippets/data_client/__init__.py b/samples/snippets/data_client/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/snippets/data_client/data_client_snippets_async.py b/samples/snippets/data_client/data_client_snippets_async.py new file mode 100644 index 000000000..332dbd56f --- /dev/null +++ b/samples/snippets/data_client/data_client_snippets_async.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python + +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +async def write_simple(table): + # [START bigtable_async_write_simple] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import SetCell + + async def write_simple(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = b"phone#4c410523#20190501" + + cell_mutation = SetCell(family_id, "connected_cell", 1) + wifi_mutation = SetCell(family_id, "connected_wifi", 1) + os_mutation = SetCell(family_id, "os_build", "PQ2A.190405.003") + + await table.mutate_row(row_key, cell_mutation) + await table.mutate_row(row_key, wifi_mutation) + await table.mutate_row(row_key, os_mutation) + + # [END bigtable_async_write_simple] + await write_simple(table.client.project, table.instance_id, table.table_id) + + +async def write_batch(table): + # [START bigtable_async_writes_batch] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + + async def write_batch(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + try: + async with table.mutations_batcher() as batcher: + mutation_list = [ + SetCell(family_id, "connected_cell", 1), + SetCell(family_id, "connected_wifi", 1), + SetCell(family_id, "os_build", "12155.0.0-rc1"), + ] + # awaiting the batcher.append method adds the RowMutationEntry + # to the batcher's queue to be written in the next flush. + await batcher.append( + RowMutationEntry("tablet#a0b81f74#20190501", mutation_list) + ) + await batcher.append( + RowMutationEntry("tablet#a0b81f74#20190502", mutation_list) + ) + except MutationsExceptionGroup as e: + # MutationsExceptionGroup contains a FailedMutationEntryError for + # each mutation that failed. + for sub_exception in e.exceptions: + failed_entry: RowMutationEntry = sub_exception.entry + cause: Exception = sub_exception.__cause__ + print( + f"Failed mutation: {failed_entry.row_key} with error: {cause!r}" + ) + + # [END bigtable_async_writes_batch] + await write_batch(table.client.project, table.instance_id, table.table_id) + + +async def write_increment(table): + # [START bigtable_async_write_increment] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + async def write_increment(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = "phone#4c410523#20190501" + + # Decrement the connected_wifi value by 1. + increment_rule = IncrementRule( + family_id, "connected_wifi", increment_amount=-1 + ) + result_row = await table.read_modify_write_row(row_key, increment_rule) + + # check result + cell = result_row[0] + print(f"{cell.row_key} value: {int(cell)}") + + # [END bigtable_async_write_increment] + await write_increment(table.client.project, table.instance_id, table.table_id) + + +async def write_conditional(table): + # [START bigtable_async_writes_conditional] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import SetCell + + async def write_conditional(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = "phone#4c410523#20190501" + + row_filter = row_filters.RowFilterChain( + filters=[ + row_filters.FamilyNameRegexFilter(family_id), + row_filters.ColumnQualifierRegexFilter("os_build"), + row_filters.ValueRegexFilter("PQ2A\\..*"), + ] + ) + + if_true = SetCell(family_id, "os_name", "android") + result = await table.check_and_mutate_row( + row_key, + row_filter, + true_case_mutations=if_true, + false_case_mutations=None, + ) + if result is True: + print("The row os_name was set to android") + + # [END bigtable_async_writes_conditional] + await write_conditional(table.client.project, table.instance_id, table.table_id) + + +async def write_aggregate(table): + # [START bigtable_async_write_aggregate] + import time + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data.mutations import AddToCell, RowMutationEntry + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + + async def write_aggregate(project_id, instance_id, table_id): + """Increments a value in a Bigtable table using AddToCell mutation.""" + async with BigtableDataClientAsync(project=project_id) as client: + table = client.get_table(instance_id, table_id) + row_key = "unique_device_ids_1" + try: + async with table.mutations_batcher() as batcher: + # The AddToCell mutation increments the value of a cell. + # The `counters` family must be set up to be an aggregate + # family with an int64 input type. + reading = AddToCell( + family="counters", + qualifier="odometer", + value=32304, + # Convert nanoseconds to microseconds + timestamp_micros=time.time_ns() // 1000, + ) + await batcher.append( + RowMutationEntry(row_key.encode("utf-8"), [reading]) + ) + except MutationsExceptionGroup as e: + # MutationsExceptionGroup contains a FailedMutationEntryError for + # each mutation that failed. + for sub_exception in e.exceptions: + failed_entry: RowMutationEntry = sub_exception.entry + cause: Exception = sub_exception.__cause__ + print( + f"Failed mutation for row {failed_entry.row_key!r} with error: {cause!r}" + ) + + # [END bigtable_async_write_aggregate] + await write_aggregate(table.client.project, table.instance_id, table.table_id) + + +async def read_row(table): + # [START bigtable_async_reads_row] + from google.cloud.bigtable.data import BigtableDataClientAsync + + async def read_row(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + row_key = "phone#4c410523#20190501" + row = await table.read_row(row_key) + print(row) + + # [END bigtable_async_reads_row] + await read_row(table.client.project, table.instance_id, table.table_id) + + +async def read_row_partial(table): + # [START bigtable_async_reads_row_partial] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import row_filters + + async def read_row_partial(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + row_key = "phone#4c410523#20190501" + col_filter = row_filters.ColumnQualifierRegexFilter(b"os_build") + + row = await table.read_row(row_key, row_filter=col_filter) + print(row) + + # [END bigtable_async_reads_row_partial] + await read_row_partial(table.client.project, table.instance_id, table.table_id) + + +async def read_rows_multiple(table): + # [START bigtable_async_reads_rows] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + + async def read_rows(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + query = ReadRowsQuery( + row_keys=[b"phone#4c410523#20190501", b"phone#4c410523#20190502"] + ) + async for row in await table.read_rows_stream(query): + print(row) + + # [END bigtable_async_reads_rows] + await read_rows(table.client.project, table.instance_id, table.table_id) + + +async def read_row_range(table): + # [START bigtable_async_reads_row_range] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + async def read_row_range(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + row_range = RowRange( + start_key=b"phone#4c410523#20190501", + end_key=b"phone#4c410523#201906201", + ) + query = ReadRowsQuery(row_ranges=[row_range]) + + async for row in await table.read_rows_stream(query): + print(row) + + # [END bigtable_async_reads_row_range] + await read_row_range(table.client.project, table.instance_id, table.table_id) + + +async def read_with_prefix(table): + # [START bigtable_async_reads_prefix] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + async def read_prefix(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + prefix = "phone#" + end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1) + prefix_range = RowRange(start_key=prefix, end_key=end_key) + query = ReadRowsQuery(row_ranges=[prefix_range]) + + async for row in await table.read_rows_stream(query): + print(row) + + # [END bigtable_async_reads_prefix] + await read_prefix(table.client.project, table.instance_id, table.table_id) + + +async def read_with_filter(table): + # [START bigtable_async_reads_filter] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + async def read_with_filter(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + row_filter = row_filters.ValueRegexFilter(b"PQ2A.*$") + query = ReadRowsQuery(row_filter=row_filter) + + async for row in await table.read_rows_stream(query): + print(row) + + # [END bigtable_async_reads_filter] + await read_with_filter(table.client.project, table.instance_id, table.table_id) + + +async def execute_query(table): + # [START bigtable_async_execute_query] + from google.cloud.bigtable.data import BigtableDataClientAsync + + async def execute_query(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + query = ( + "SELECT _key, stats_summary['os_build'], " + "stats_summary['connected_cell'], " + "stats_summary['connected_wifi'] " + f"from `{table_id}` WHERE _key=@row_key" + ) + result = await client.execute_query( + query, + instance_id, + parameters={"row_key": b"phone#4c410523#20190501"}, + ) + results = [r async for r in result] + print(results) + + # [END bigtable_async_execute_query] + await execute_query(table.client.project, table.instance_id, table.table_id) diff --git a/samples/snippets/data_client/data_client_snippets_async_test.py b/samples/snippets/data_client/data_client_snippets_async_test.py new file mode 100644 index 000000000..2761bd487 --- /dev/null +++ b/samples/snippets/data_client/data_client_snippets_async_test.py @@ -0,0 +1,117 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import pytest_asyncio +import os +import uuid + +from . import data_client_snippets_async as data_snippets +from ...utils import create_table_cm + + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID = f"data-client-{str(uuid.uuid4())[:16]}" + + +@pytest.fixture(scope="session") +def column_family_config(): + from google.cloud.bigtable_admin_v2 import types + + int_aggregate_type = types.Type.Aggregate( + input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}), + sum={}, + ) + + return { + "family": types.ColumnFamily(), + "stats_summary": types.ColumnFamily(), + "counters": types.ColumnFamily( + value_type=types.Type(aggregate_type=int_aggregate_type) + ), + } + + +@pytest.fixture(scope="session") +def table_id(column_family_config): + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_family_config): + yield TABLE_ID + + +@pytest_asyncio.fixture +async def table(table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + yield table + + +@pytest.mark.asyncio +async def test_write_simple(table): + await data_snippets.write_simple(table) + + +@pytest.mark.asyncio +async def test_write_batch(table): + await data_snippets.write_batch(table) + + +@pytest.mark.asyncio +async def test_write_increment(table): + await data_snippets.write_increment(table) + + +@pytest.mark.asyncio +async def test_write_conditional(table): + await data_snippets.write_conditional(table) + + +@pytest.mark.asyncio +async def test_write_aggregate(table): + await data_snippets.write_aggregate(table) + + +@pytest.mark.asyncio +async def test_read_row(table): + await data_snippets.read_row(table) + + +@pytest.mark.asyncio +async def test_read_row_partial(table): + await data_snippets.read_row_partial(table) + + +@pytest.mark.asyncio +async def test_read_rows_multiple(table): + await data_snippets.read_rows_multiple(table) + + +@pytest.mark.asyncio +async def test_read_row_range(table): + await data_snippets.read_row_range(table) + + +@pytest.mark.asyncio +async def test_read_with_prefix(table): + await data_snippets.read_with_prefix(table) + + +@pytest.mark.asyncio +async def test_read_with_filter(table): + await data_snippets.read_with_filter(table) + + +@pytest.mark.asyncio +async def test_execute_query(table): + await data_snippets.execute_query(table) diff --git a/samples/snippets/data_client/noxfile.py b/samples/snippets/data_client/noxfile.py new file mode 100644 index 000000000..a169b5b5b --- /dev/null +++ b/samples/snippets/data_client/noxfile.py @@ -0,0 +1,292 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import glob +import os +from pathlib import Path +import sys +from typing import Callable, Dict, Optional + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" + +# Copy `noxfile_config.py` to your directory and modify it instead. + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars() -> Dict[str, str]: + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to test samples. +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + +# +# Style Checks +# + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8") + else: + session.install("flake8", "flake8-annotations") + + args = FLAKE8_COMMON_ARGS + [ + ".", + ] + session.run("flake8", *args) + + +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + # check for presence of tests + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) + + if len(test_list) == 0: + print("No tests found, skipping directory.") + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session: nox.sessions.Session) -> None: + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root() -> Optional[str]: + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session: nox.sessions.Session, path: str) -> None: + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/samples/snippets/data_client/requirements-test.txt b/samples/snippets/data_client/requirements-test.txt new file mode 100644 index 000000000..ee4ba0186 --- /dev/null +++ b/samples/snippets/data_client/requirements-test.txt @@ -0,0 +1,2 @@ +pytest +pytest-asyncio diff --git a/samples/snippets/data_client/requirements.txt b/samples/snippets/data_client/requirements.txt new file mode 100644 index 000000000..730d25dec --- /dev/null +++ b/samples/snippets/data_client/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==2.35.0 diff --git a/samples/snippets/deletes/__init__.py b/samples/snippets/deletes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/snippets/deletes/deletes_async_test.py b/samples/snippets/deletes/deletes_async_test.py new file mode 100644 index 000000000..4fb4898e5 --- /dev/null +++ b/samples/snippets/deletes/deletes_async_test.py @@ -0,0 +1,274 @@ +# Copyright 2024, Google LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import uuid +from typing import AsyncGenerator + +from google.cloud._helpers import _microseconds_from_datetime +import pytest +import pytest_asyncio + +from . import deletes_snippets_async +from ...utils import create_table_cm + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID = f"mobile-time-series-deletes-async-{str(uuid.uuid4())[:16]}" + + +@pytest.fixture(scope="module") +def event_loop(): + import asyncio + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest_asyncio.fixture(scope="module", autouse=True) +async def table_id() -> AsyncGenerator[str, None]: + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}, verbose=False): + await _populate_table(TABLE_ID) + yield TABLE_ID + + +async def _populate_table(table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + RowMutationEntry, + SetCell, + ) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.003", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "true", + _microseconds_from_datetime(timestamp_minus_hr), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "false", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.004", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell( + "stats_summary", + "connected_cell", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190401.002", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + + +def assert_output_match(capsys, expected): + out, _ = capsys.readouterr() + assert out == expected + + +@pytest.mark.asyncio +async def test_delete_from_column(capsys, table_id): + await deletes_snippets_async.delete_from_column( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_delete_from_column_family(capsys, table_id): + await deletes_snippets_async.delete_from_column_family( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_delete_from_row(capsys, table_id): + await deletes_snippets_async.delete_from_row(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_streaming_and_batching(capsys, table_id): + await deletes_snippets_async.streaming_and_batching( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_check_and_mutate(capsys, table_id): + await deletes_snippets_async.check_and_mutate(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_output_match(capsys, "") diff --git a/samples/snippets/deletes/deletes_snippets.py b/samples/snippets/deletes/deletes_snippets.py index 8e78083bf..6cdbf33a6 100644 --- a/samples/snippets/deletes/deletes_snippets.py +++ b/samples/snippets/deletes/deletes_snippets.py @@ -14,14 +14,11 @@ # limitations under the License. -from google.cloud import bigtable - -# Write your code here. - - # [START bigtable_delete_from_column] def delete_from_column(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -33,13 +30,13 @@ def delete_from_column(project_id, instance_id, table_id): # [START bigtable_delete_from_column_family] def delete_from_column_family(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") - row.delete_cells( - column_family_id="cell_plan", columns=row.ALL_COLUMNS - ) + row.delete_cells(column_family_id="cell_plan", columns=row.ALL_COLUMNS) row.commit() @@ -48,7 +45,9 @@ def delete_from_column_family(project_id, instance_id, table_id): # [START bigtable_delete_from_row] def delete_from_row(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -60,7 +59,9 @@ def delete_from_row(project_id, instance_id, table_id): # [START bigtable_streaming_and_batching] def streaming_and_batching(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) batcher = table.mutations_batcher(flush_count=2) @@ -76,7 +77,9 @@ def streaming_and_batching(project_id, instance_id, table_id): # [START bigtable_check_and_mutate] def check_and_mutate(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -90,7 +93,9 @@ def check_and_mutate(project_id, instance_id, table_id): # [START bigtable_drop_row_range] def drop_row_range(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row_key_prefix = "phone#4c410523" @@ -101,7 +106,9 @@ def drop_row_range(project_id, instance_id, table_id): # [START bigtable_delete_column_family] def delete_column_family(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) column_family_id = "stats_summary" @@ -113,7 +120,9 @@ def delete_column_family(project_id, instance_id, table_id): # [START bigtable_delete_table] def delete_table(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) table.delete() diff --git a/samples/snippets/deletes/deletes_snippets_async.py b/samples/snippets/deletes/deletes_snippets_async.py new file mode 100644 index 000000000..2241fab4a --- /dev/null +++ b/samples/snippets/deletes/deletes_snippets_async.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# [START bigtable_delete_from_column_asyncio] +async def delete_from_column(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row( + "phone#4c410523#20190501", + DeleteRangeFromColumn(family="cell_plan", qualifier=b"data_plan_01gb"), + ) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_column_asyncio] + +# [START bigtable_delete_from_column_family_asyncio] +async def delete_from_column_family(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteAllFromFamily + + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row("phone#4c410523#20190501", DeleteAllFromFamily("cell_plan")) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_column_family_asyncio] + + +# [START bigtable_delete_from_row_asyncio] +async def delete_from_row(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteAllFromRow + + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row("phone#4c410523#20190501", DeleteAllFromRow()) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_row_asyncio] + +# [START bigtable_streaming_and_batching_asyncio] +async def streaming_and_batching(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + from google.cloud.bigtable.data import RowMutationEntry + from google.cloud.bigtable.data import ReadRowsQuery + + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + async with table.mutations_batcher() as batcher: + async for row in await table.read_rows_stream(ReadRowsQuery(limit=10)): + await batcher.append( + RowMutationEntry( + row.row_key, + DeleteRangeFromColumn( + family="cell_plan", qualifier=b"data_plan_01gb" + ), + ) + ) + + await table.close() + await client.close() + + +# [END bigtable_streaming_and_batching_asyncio] + +# [START bigtable_check_and_mutate_asyncio] +async def check_and_mutate(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.check_and_mutate_row( + "phone#4c410523#20190501", + predicate=LiteralValueFilter("PQ2A.190405.003"), + true_case_mutations=DeleteRangeFromColumn( + family="cell_plan", qualifier=b"data_plan_01gb" + ), + ) + + await table.close() + await client.close() + + +# [END bigtable_check_and_mutate_asyncio] diff --git a/samples/snippets/deletes/deletes_test.py b/samples/snippets/deletes/deletes_test.py index bf23daa59..3284c37da 100644 --- a/samples/snippets/deletes/deletes_test.py +++ b/samples/snippets/deletes/deletes_test.py @@ -1,4 +1,5 @@ # Copyright 2020, Google LLC + # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,123 +18,116 @@ import time import uuid -from google.cloud import bigtable import pytest -import deletes_snippets +from . import deletes_snippets +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-deletes-{str(uuid.uuid4())[:16]}" -@pytest.fixture(scope="module", autouse=True) +@pytest.fixture(scope="module") def table_id(): from google.cloud.bigtable.row_set import RowSet - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - - timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) - - row_keys = [ - "phone#4c410523#20190501", - "phone#4c410523#20190502", - "phone#4c410523#20190505", - "phone#5c10102#20190501", - "phone#5c10102#20190502", - ] - - rows = [table.direct_row(row_key) for row_key in row_keys] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) - rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) - rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - - table.mutate_rows(rows) - - # Ensure mutations have propagated. - row_set = RowSet() - - for row_key in row_keys: - row_set.add_row_key(row_key) - - fetched = list(table.read_rows(row_set=row_set)) - - while len(fetched) < len(rows): - time.sleep(5) + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}, verbose=False) as table: + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) + + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", + ] + + rows = [table.direct_row(row_key) for row_key in row_keys] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + fetched = list(table.read_rows(row_set=row_set)) - yield table_id + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) + + yield TABLE_ID -def assert_snapshot_match(capsys, snapshot): +def assert_output_match(capsys, expected): out, _ = capsys.readouterr() - snapshot.assert_match(out) + assert out == expected -def test_delete_from_column(capsys, snapshot, table_id): +def test_delete_from_column(capsys, table_id): deletes_snippets.delete_from_column(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_from_column_family(capsys, snapshot, table_id): +def test_delete_from_column_family(capsys, table_id): deletes_snippets.delete_from_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_from_row(capsys, snapshot, table_id): +def test_delete_from_row(capsys, table_id): deletes_snippets.delete_from_row(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_streaming_and_batching(capsys, snapshot, table_id): +def test_streaming_and_batching(capsys, table_id): deletes_snippets.streaming_and_batching(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_check_and_mutate(capsys, snapshot, table_id): +def test_check_and_mutate(capsys, table_id): deletes_snippets.check_and_mutate(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_drop_row_range(capsys, snapshot, table_id): +def test_drop_row_range(capsys, table_id): deletes_snippets.drop_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_column_family(capsys, snapshot, table_id): +def test_delete_column_family(capsys, table_id): deletes_snippets.delete_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_table(capsys, snapshot, table_id): - deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) +def test_delete_table(capsys): + delete_table_id = f"to-delete-table-{str(uuid.uuid4())[:16]}" + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, delete_table_id, verbose=False): + deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, delete_table_id) + assert_output_match(capsys, "") diff --git a/samples/snippets/deletes/noxfile.py b/samples/snippets/deletes/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/snippets/deletes/noxfile.py +++ b/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/deletes/requirements-test.txt b/samples/snippets/deletes/requirements-test.txt index c4d04a08d..ee4ba0186 100644 --- a/samples/snippets/deletes/requirements-test.txt +++ b/samples/snippets/deletes/requirements-test.txt @@ -1 +1,2 @@ -pytest==7.3.1 +pytest +pytest-asyncio diff --git a/samples/snippets/deletes/requirements.txt b/samples/snippets/deletes/requirements.txt index 200665631..730d25dec 100644 --- a/samples/snippets/deletes/requirements.txt +++ b/samples/snippets/deletes/requirements.txt @@ -1,2 +1 @@ -google-cloud-bigtable==2.17.0 -snapshottest==0.6.0 \ No newline at end of file +google-cloud-bigtable==2.35.0 diff --git a/samples/snippets/deletes/snapshots/snap_deletes_test.py b/samples/snippets/deletes/snapshots/snap_deletes_test.py deleted file mode 100644 index 04a7db940..000000000 --- a/samples/snippets/deletes/snapshots/snap_deletes_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc -from __future__ import unicode_literals - -from snapshottest import Snapshot - - -snapshots = Snapshot() - -snapshots['test_check_and_mutate 1'] = '' - -snapshots['test_delete_column_family 1'] = '' - -snapshots['test_delete_from_column 1'] = '' - -snapshots['test_delete_from_column_family 1'] = '' - -snapshots['test_delete_from_row 1'] = '' - -snapshots['test_delete_table 1'] = '' - -snapshots['test_drop_row_range 1'] = '' - -snapshots['test_streaming_and_batching 1'] = '' diff --git a/samples/snippets/filters/__init__.py b/samples/snippets/filters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/snippets/filters/filter_snippets.py b/samples/snippets/filters/filter_snippets.py index 4211378f3..d17c773a4 100644 --- a/samples/snippets/filters/filter_snippets.py +++ b/samples/snippets/filters/filter_snippets.py @@ -13,18 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_filters_print] -import datetime - -from google.cloud import bigtable -import google.cloud.bigtable.row_filters as row_filters - -# Write your code here. -# [START_EXCLUDE] - # [START bigtable_filters_limit_row_sample] def filter_limit_row_sample(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -37,6 +31,9 @@ def filter_limit_row_sample(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_sample] # [START bigtable_filters_limit_row_regex] def filter_limit_row_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -51,6 +48,9 @@ def filter_limit_row_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_regex] # [START bigtable_filters_limit_cells_per_col] def filter_limit_cells_per_col(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -63,6 +63,9 @@ def filter_limit_cells_per_col(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_col] # [START bigtable_filters_limit_cells_per_row] def filter_limit_cells_per_row(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -75,6 +78,9 @@ def filter_limit_cells_per_row(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row] # [START bigtable_filters_limit_cells_per_row_offset] def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -87,6 +93,9 @@ def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row_offset] # [START bigtable_filters_limit_col_family_regex] def filter_limit_col_family_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -101,6 +110,9 @@ def filter_limit_col_family_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_family_regex] # [START bigtable_filters_limit_col_qualifier_regex] def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -115,6 +127,9 @@ def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_qualifier_regex] # [START bigtable_filters_limit_col_range] def filter_limit_col_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -131,6 +146,9 @@ def filter_limit_col_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_range] # [START bigtable_filters_limit_value_range] def filter_limit_value_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -148,6 +166,9 @@ def filter_limit_value_range(project_id, instance_id, table_id): def filter_limit_value_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -162,6 +183,10 @@ def filter_limit_value_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_value_regex] # [START bigtable_filters_limit_timestamp_range] def filter_limit_timestamp_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + import datetime + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -178,6 +203,9 @@ def filter_limit_timestamp_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_timestamp_range] # [START bigtable_filters_limit_block_all] def filter_limit_block_all(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -190,6 +218,9 @@ def filter_limit_block_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_block_all] # [START bigtable_filters_limit_pass_all] def filter_limit_pass_all(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -202,6 +233,9 @@ def filter_limit_pass_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_pass_all] # [START bigtable_filters_modify_strip_value] def filter_modify_strip_value(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -214,6 +248,9 @@ def filter_modify_strip_value(project_id, instance_id, table_id): # [END bigtable_filters_modify_strip_value] # [START bigtable_filters_modify_apply_label] def filter_modify_apply_label(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -226,6 +263,9 @@ def filter_modify_apply_label(project_id, instance_id, table_id): # [END bigtable_filters_modify_apply_label] # [START bigtable_filters_composing_chain] def filter_composing_chain(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -245,6 +285,9 @@ def filter_composing_chain(project_id, instance_id, table_id): # [END bigtable_filters_composing_chain] # [START bigtable_filters_composing_interleave] def filter_composing_interleave(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -264,6 +307,9 @@ def filter_composing_interleave(project_id, instance_id, table_id): # [END bigtable_filters_composing_interleave] # [START bigtable_filters_composing_condition] def filter_composing_condition(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -285,9 +331,8 @@ def filter_composing_condition(project_id, instance_id, table_id): # [END bigtable_filters_composing_condition] -# [END_EXCLUDE] - +# [START bigtable_filters_print] def print_row(row): print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): diff --git a/samples/snippets/filters/filter_snippets_async.py b/samples/snippets/filters/filter_snippets_async.py new file mode 100644 index 000000000..899d4c5c7 --- /dev/null +++ b/samples/snippets/filters/filter_snippets_async.py @@ -0,0 +1,389 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# [START bigtable_filters_limit_row_sample_asyncio] +async def filter_limit_row_sample(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_sample_asyncio] +# [START bigtable_filters_limit_row_regex_asyncio] +async def filter_limit_row_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_regex_asyncio] +# [START bigtable_filters_limit_cells_per_col_asyncio] +async def filter_limit_cells_per_col(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_col_asyncio] +# [START bigtable_filters_limit_cells_per_row_asyncio] +async def filter_limit_cells_per_row(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_asyncio] +# [START bigtable_filters_limit_cells_per_row_offset_asyncio] +async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_offset_asyncio] +# [START bigtable_filters_limit_col_family_regex_asyncio] +async def filter_limit_col_family_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_family_regex_asyncio] +# [START bigtable_filters_limit_col_qualifier_regex_asyncio] +async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.ColumnQualifierRegexFilter( + "connected_.*$".encode("utf-8") + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_qualifier_regex_asyncio] +# [START bigtable_filters_limit_col_range_asyncio] +async def filter_limit_col_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.ColumnRangeFilter( + "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_range_asyncio] +# [START bigtable_filters_limit_value_range_asyncio] +async def filter_limit_value_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_range_asyncio] +# [START bigtable_filters_limit_value_regex_asyncio] + + +async def filter_limit_value_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_regex_asyncio] +# [START bigtable_filters_limit_timestamp_range_asyncio] +async def filter_limit_timestamp_range(project_id, instance_id, table_id): + import datetime + + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + end = datetime.datetime(2019, 5, 1) + + query = ReadRowsQuery(row_filter=row_filters.TimestampRangeFilter(end=end)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_timestamp_range_asyncio] +# [START bigtable_filters_limit_block_all_asyncio] +async def filter_limit_block_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_block_all_asyncio] +# [START bigtable_filters_limit_pass_all_asyncio] +async def filter_limit_pass_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_pass_all_asyncio] +# [START bigtable_filters_modify_strip_value_asyncio] +async def filter_modify_strip_value(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_strip_value_asyncio] +# [START bigtable_filters_modify_apply_label_asyncio] +async def filter_modify_apply_label(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_apply_label_asyncio] +# [START bigtable_filters_composing_chain_asyncio] +async def filter_composing_chain(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterChain( + filters=[ + row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_chain_asyncio] +# [START bigtable_filters_composing_interleave_asyncio] +async def filter_composing_interleave(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterUnion( + filters=[ + row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_interleave_asyncio] +# [START bigtable_filters_composing_condition_asyncio] +async def filter_composing_condition(project_id, instance_id, table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) + + query = ReadRowsQuery( + row_filter=row_filters.ConditionalRowFilter( + predicate_filter=row_filters.RowFilterChain( + filters=[ + row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), + row_filters.ValueRegexFilter("true"), + ] + ), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_condition_asyncio] + + +def print_row(row): + from google.cloud._helpers import _datetime_from_microseconds + + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) + last_family = None + for cell in row.cells: + if last_family != cell.family: + print("Column Family {}".format(cell.family)) + last_family = cell.family + + labels = " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + print( + "\t{}: {} @{}{}".format( + cell.qualifier.decode("utf-8"), + cell.value.decode("utf-8"), + _datetime_from_microseconds(cell.timestamp_micros), + labels, + ) + ) + print("") diff --git a/samples/snippets/filters/filter_snippets_async_test.py b/samples/snippets/filters/filter_snippets_async_test.py new file mode 100644 index 000000000..a3f83a6f2 --- /dev/null +++ b/samples/snippets/filters/filter_snippets_async_test.py @@ -0,0 +1,447 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import uuid + +import inspect +from typing import AsyncGenerator + +import pytest +import pytest_asyncio +from .snapshots.snap_filters_test import snapshots + +from . import filter_snippets_async +from ...utils import create_table_cm +from google.cloud._helpers import ( + _microseconds_from_datetime, +) + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID = f"mobile-time-series-filters-async-{str(uuid.uuid4())[:16]}" + + +@pytest.fixture(scope="module") +def event_loop(): + import asyncio + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest_asyncio.fixture(scope="module", autouse=True) +async def table_id() -> AsyncGenerator[str, None]: + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}): + await _populate_table(TABLE_ID) + yield TABLE_ID + + +async def _populate_table(table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + RowMutationEntry, + SetCell, + ) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.003", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "true", + _microseconds_from_datetime(timestamp_minus_hr), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "false", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.004", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell( + "stats_summary", + "connected_cell", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190401.002", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + + +def _datetime_to_micros(value: datetime.datetime) -> int: + """Uses the same conversion rules as the old client in""" + import calendar + import datetime as dt + if not value.tzinfo: + value = value.replace(tzinfo=datetime.timezone.utc) + # Regardless of what timezone is on the value, convert it to UTC. + value = value.astimezone(datetime.timezone.utc) + # Convert the datetime to a microsecond timestamp. + return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond + return int(dt.timestamp() * 1000 * 1000) + + +@pytest.mark.asyncio +async def test_filter_limit_row_sample(capsys, table_id): + await filter_snippets_async.filter_limit_row_sample( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + assert "Reading data for" in out + + +@pytest.mark.asyncio +async def test_filter_limit_row_regex(capsys, table_id): + await filter_snippets_async.filter_limit_row_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_col(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_col( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row_offset(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row_offset( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_family_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_family_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_qualifier_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_qualifier_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_range(capsys, table_id): + await filter_snippets_async.filter_limit_col_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_range(capsys, table_id): + await filter_snippets_async.filter_limit_value_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_regex(capsys, table_id): + await filter_snippets_async.filter_limit_value_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_timestamp_range(capsys, table_id): + await filter_snippets_async.filter_limit_timestamp_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_block_all(capsys, table_id): + await filter_snippets_async.filter_limit_block_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_pass_all(capsys, table_id): + await filter_snippets_async.filter_limit_pass_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_strip_value(capsys, table_id): + await filter_snippets_async.filter_modify_strip_value( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_apply_label(capsys, table_id): + await filter_snippets_async.filter_modify_apply_label( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_chain(capsys, table_id): + await filter_snippets_async.filter_composing_chain( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_interleave(capsys, table_id): + await filter_snippets_async.filter_composing_interleave( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_condition(capsys, table_id): + await filter_snippets_async.filter_composing_condition( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected diff --git a/samples/snippets/filters/filters_test.py b/samples/snippets/filters/filters_test.py index 35cf62ff0..fe99886bd 100644 --- a/samples/snippets/filters/filters_test.py +++ b/samples/snippets/filters/filters_test.py @@ -13,215 +13,224 @@ import datetime +import inspect import os import time import uuid -from google.cloud import bigtable import pytest -import filter_snippets - +from . import filter_snippets +from .snapshots.snap_filters_test import snapshots +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-filters-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) def table_id(): from google.cloud.bigtable.row_set import RowSet - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - - timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) - - row_keys = [ - "phone#4c410523#20190501", - "phone#4c410523#20190502", - "phone#4c410523#20190505", - "phone#5c10102#20190501", - "phone#5c10102#20190502", - ] - - rows = [table.direct_row(row_key) for row_key in row_keys] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) - rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) - rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - - table.mutate_rows(rows) - - # Ensure mutations have propagated. - row_set = RowSet() - - for row_key in row_keys: - row_set.add_row_key(row_key) - - fetched = list(table.read_rows(row_set=row_set)) - - while len(fetched) < len(rows): - time.sleep(5) + table_id = TABLE_ID + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, table_id, {"stats_summary": None, "cell_plan": None}) as table: + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) + + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", + ] + + rows = [table.direct_row(row_key) for row_key in row_keys] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + fetched = list(table.read_rows(row_set=row_set)) - yield table_id + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) - table.delete() + yield table_id -def test_filter_limit_row_sample(capsys, snapshot, table_id): +def test_filter_limit_row_sample(capsys, table_id): filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert "Reading data for" in out -def test_filter_limit_row_regex(capsys, snapshot, table_id): +def test_filter_limit_row_regex(capsys, table_id): filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_col(capsys, snapshot, table_id): +def test_filter_limit_cells_per_col(capsys, table_id): filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_row(capsys, snapshot, table_id): +def test_filter_limit_cells_per_row(capsys, table_id): filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_row_offset(capsys, snapshot, table_id): +def test_filter_limit_cells_per_row_offset(capsys, table_id): filter_snippets.filter_limit_cells_per_row_offset( PROJECT, BIGTABLE_INSTANCE, table_id ) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_family_regex(capsys, snapshot, table_id): +def test_filter_limit_col_family_regex(capsys, table_id): filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_qualifier_regex(capsys, snapshot, table_id): +def test_filter_limit_col_qualifier_regex(capsys, table_id): filter_snippets.filter_limit_col_qualifier_regex( PROJECT, BIGTABLE_INSTANCE, table_id ) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_range(capsys, snapshot, table_id): +def test_filter_limit_col_range(capsys, table_id): filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_value_range(capsys, snapshot, table_id): +def test_filter_limit_value_range(capsys, table_id): filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_value_regex(capsys, snapshot, table_id): +def test_filter_limit_value_regex(capsys, table_id): filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_timestamp_range(capsys, snapshot, table_id): +def test_filter_limit_timestamp_range(capsys, table_id): filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_block_all(capsys, snapshot, table_id): +def test_filter_limit_block_all(capsys, table_id): filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_pass_all(capsys, snapshot, table_id): +def test_filter_limit_pass_all(capsys, table_id): filter_snippets.filter_limit_pass_all(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_modify_strip_value(capsys, snapshot, table_id): +def test_filter_modify_strip_value(capsys, table_id): filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_modify_apply_label(capsys, snapshot, table_id): +def test_filter_modify_apply_label(capsys, table_id): filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_chain(capsys, snapshot, table_id): +def test_filter_composing_chain(capsys, table_id): filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_interleave(capsys, snapshot, table_id): +def test_filter_composing_interleave(capsys, table_id): filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_condition(capsys, snapshot, table_id): +def test_filter_composing_condition(capsys, table_id): filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected diff --git a/samples/snippets/filters/noxfile.py b/samples/snippets/filters/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/snippets/filters/noxfile.py +++ b/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt index c4d04a08d..ee4ba0186 100644 --- a/samples/snippets/filters/requirements-test.txt +++ b/samples/snippets/filters/requirements-test.txt @@ -1 +1,2 @@ -pytest==7.3.1 +pytest +pytest-asyncio diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt index 200665631..730d25dec 100644 --- a/samples/snippets/filters/requirements.txt +++ b/samples/snippets/filters/requirements.txt @@ -1,2 +1 @@ -google-cloud-bigtable==2.17.0 -snapshottest==0.6.0 \ No newline at end of file +google-cloud-bigtable==2.35.0 diff --git a/samples/snippets/filters/snapshots/snap_filters_test.py b/samples/snippets/filters/snapshots/snap_filters_test.py index a0580f565..2331c93bc 100644 --- a/samples/snippets/filters/snapshots/snap_filters_test.py +++ b/samples/snippets/filters/snapshots/snap_filters_test.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc -# flake8: noqa +# this was previously implemented using the `snapshottest` package (https://goo.gl/zC4yUc), +# which is not compatible with Python 3.12. So we moved to a standard dictionary storing +# expected outputs for each test from __future__ import unicode_literals -from snapshottest import Snapshot -snapshots = Snapshot() +snapshots = {} -snapshots['test_filter_limit_row_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_row_regex'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -27,7 +27,7 @@ ''' -snapshots['test_filter_limit_cells_per_col 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_col'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -71,7 +71,7 @@ ''' -snapshots['test_filter_limit_cells_per_row 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_row'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -102,7 +102,7 @@ ''' -snapshots['test_filter_limit_cells_per_row_offset 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_row_offset'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 Column Family stats_summary @@ -132,7 +132,7 @@ ''' -snapshots['test_filter_limit_col_family_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_family_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -164,7 +164,7 @@ ''' -snapshots['test_filter_limit_col_qualifier_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_qualifier_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -191,7 +191,7 @@ ''' -snapshots['test_filter_limit_col_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_range'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -207,7 +207,7 @@ ''' -snapshots['test_filter_limit_value_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_value_range'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -217,7 +217,7 @@ ''' -snapshots['test_filter_limit_value_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_value_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -239,15 +239,15 @@ ''' -snapshots['test_filter_limit_timestamp_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_timestamp_range'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 ''' -snapshots['test_filter_limit_block_all 1'] = '' +snapshots['test_filter_limit_block_all'] = '' -snapshots['test_filter_limit_pass_all 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_pass_all'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -291,7 +291,7 @@ ''' -snapshots['test_filter_modify_strip_value 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_modify_strip_value'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: @2019-04-30 23:00:00+00:00 @@ -335,7 +335,7 @@ ''' -snapshots['test_filter_modify_apply_label 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_modify_apply_label'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [labelled] \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [labelled] @@ -379,7 +379,7 @@ ''' -snapshots['test_filter_composing_chain 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_chain'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 @@ -402,7 +402,7 @@ ''' -snapshots['test_filter_composing_interleave 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_interleave'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 @@ -435,7 +435,7 @@ ''' -snapshots['test_filter_composing_condition 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_condition'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [filtered-out] \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [filtered-out] diff --git a/samples/snippets/reads/__init__.py b/samples/snippets/reads/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/snippets/reads/noxfile.py b/samples/snippets/reads/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/snippets/reads/noxfile.py +++ b/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/reads/read_snippets.py b/samples/snippets/reads/read_snippets.py index afd0955b8..210ca73a7 100644 --- a/samples/snippets/reads/read_snippets.py +++ b/samples/snippets/reads/read_snippets.py @@ -13,17 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_reads_print] -from google.cloud import bigtable -import google.cloud.bigtable.row_filters as row_filters -from google.cloud.bigtable.row_set import RowSet - -# Write your code here. -# [START_EXCLUDE] - - # [START bigtable_reads_row] def read_row(project_id, instance_id, table_id): + from google.cloud import bigtable + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -38,6 +31,9 @@ def read_row(project_id, instance_id, table_id): # [START bigtable_reads_row_partial] def read_row_partial(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -52,6 +48,9 @@ def read_row_partial(project_id, instance_id, table_id): # [END bigtable_reads_row_partial] # [START bigtable_reads_rows] def read_rows(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -68,6 +67,9 @@ def read_rows(project_id, instance_id, table_id): # [END bigtable_reads_rows] # [START bigtable_reads_row_range] def read_row_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -85,6 +87,9 @@ def read_row_range(project_id, instance_id, table_id): # [END bigtable_reads_row_range] # [START bigtable_reads_row_ranges] def read_row_ranges(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -105,6 +110,9 @@ def read_row_ranges(project_id, instance_id, table_id): # [END bigtable_reads_row_ranges] # [START bigtable_reads_prefix] def read_prefix(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -122,6 +130,9 @@ def read_prefix(project_id, instance_id, table_id): # [END bigtable_reads_prefix] # [START bigtable_reads_filter] def read_filter(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -132,9 +143,8 @@ def read_filter(project_id, instance_id, table_id): # [END bigtable_reads_filter] -# [END_EXCLUDE] - +# [START bigtable_reads_print] def print_row(row): print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): diff --git a/samples/snippets/reads/reads_test.py b/samples/snippets/reads/reads_test.py index 0b61e341f..0078ce598 100644 --- a/samples/snippets/reads/reads_test.py +++ b/samples/snippets/reads/reads_test.py @@ -13,109 +13,105 @@ import datetime import os +import inspect import uuid -from google.cloud import bigtable import pytest -import read_snippets +from .snapshots.snap_reads_test import snapshots +from . import read_snippets +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-reads-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) def table_id(): - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None}) - - # table = instance.table(table_id) - - timestamp = datetime.datetime(2019, 5, 1) - rows = [ - table.direct_row("phone#4c410523#20190501"), - table.direct_row("phone#4c410523#20190502"), - table.direct_row("phone#4c410523#20190505"), - table.direct_row("phone#5c10102#20190501"), - table.direct_row("phone#5c10102#20190502"), - ] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - - table.mutate_rows(rows) - - yield table_id - - table.delete() - - -def test_read_row(capsys, snapshot, table_id): + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None}) as table: + timestamp = datetime.datetime(2019, 5, 1) + rows = [ + table.direct_row("phone#4c410523#20190501"), + table.direct_row("phone#4c410523#20190502"), + table.direct_row("phone#4c410523#20190505"), + table.direct_row("phone#5c10102#20190501"), + table.direct_row("phone#5c10102#20190502"), + ] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + + table.mutate_rows(rows) + + yield TABLE_ID + + +def test_read_row(capsys, table_id): read_snippets.read_row(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_partial(capsys, snapshot, table_id): +def test_read_row_partial(capsys, table_id): read_snippets.read_row_partial(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_rows(capsys, snapshot, table_id): +def test_read_rows(capsys, table_id): read_snippets.read_rows(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_range(capsys, snapshot, table_id): +def test_read_row_range(capsys, table_id): read_snippets.read_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_ranges(capsys, snapshot, table_id): +def test_read_row_ranges(capsys, table_id): read_snippets.read_row_ranges(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_prefix(capsys, snapshot, table_id): +def test_read_prefix(capsys, table_id): read_snippets.read_prefix(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_filter(capsys, snapshot, table_id): +def test_read_filter(capsys, table_id): read_snippets.read_filter(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt index c4d04a08d..e079f8a60 100644 --- a/samples/snippets/reads/requirements-test.txt +++ b/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt index 200665631..730d25dec 100644 --- a/samples/snippets/reads/requirements.txt +++ b/samples/snippets/reads/requirements.txt @@ -1,2 +1 @@ -google-cloud-bigtable==2.17.0 -snapshottest==0.6.0 \ No newline at end of file +google-cloud-bigtable==2.35.0 diff --git a/samples/snippets/reads/snapshots/snap_reads_test.py b/samples/snippets/reads/snapshots/snap_reads_test.py index f45e98f2e..564a4df7e 100644 --- a/samples/snippets/reads/snapshots/snap_reads_test.py +++ b/samples/snippets/reads/snapshots/snap_reads_test.py @@ -1,19 +1,18 @@ # -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc +# this was previously implemented using the `snapshottest` package (https://goo.gl/zC4yUc), +# which is not compatible with Python 3.12. So we moved to a standard dictionary storing +# expected outputs for each test from __future__ import unicode_literals -from snapshottest import Snapshot +snapshots = {} - -snapshots = Snapshot() - -snapshots['test_read_row_partial 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_partial'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 ''' -snapshots['test_read_rows 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_rows'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -27,7 +26,7 @@ ''' -snapshots['test_read_row_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_range'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -47,7 +46,7 @@ ''' -snapshots['test_read_row_ranges 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_ranges'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -79,7 +78,7 @@ ''' -snapshots['test_read_prefix 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_prefix'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -111,7 +110,7 @@ ''' -snapshots['test_read_filter 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_filter'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -133,7 +132,7 @@ ''' -snapshots['test_read_row 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 diff --git a/samples/snippets/writes/noxfile.py b/samples/snippets/writes/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/snippets/writes/noxfile.py +++ b/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt index 96aa71dab..5e15eb26f 100644 --- a/samples/snippets/writes/requirements-test.txt +++ b/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.3.1 +pytest diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt index 32cead029..54c0c14a3 100644 --- a/samples/snippets/writes/requirements.txt +++ b/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 \ No newline at end of file +google-cloud-bigtable==2.35.0 \ No newline at end of file diff --git a/samples/snippets/writes/write_batch.py b/samples/snippets/writes/write_batch.py index fd5117242..a583bb713 100644 --- a/samples/snippets/writes/write_batch.py +++ b/samples/snippets/writes/write_batch.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # [START bigtable_writes_batch] -import datetime +from datetime import datetime, timezone from google.cloud import bigtable +from google.cloud.bigtable.batcher import MutationsBatcher def write_batch(project_id, instance_id, table_id): @@ -23,23 +24,21 @@ def write_batch(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - timestamp = datetime.datetime.utcnow() - column_family_id = "stats_summary" + with MutationsBatcher(table=table) as batcher: + timestamp = datetime.now(timezone.utc) + column_family_id = "stats_summary" - rows = [ - table.direct_row("tablet#a0b81f74#20190501"), - table.direct_row("tablet#a0b81f74#20190502"), - ] + rows = [ + table.direct_row("tablet#a0b81f74#20190501"), + table.direct_row("tablet#a0b81f74#20190502"), + ] - rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp) - rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp) - rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp) - rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp) + rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp) + rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp) - response = table.mutate_rows(rows) - for i, status in enumerate(response): - if status.code != 0: - print("Error writing row: {}".format(status.message)) + batcher.mutate_rows(rows) print("Successfully wrote 2 rows.") diff --git a/samples/snippets/writes/write_conditionally.py b/samples/snippets/writes/write_conditionally.py index 7fb640aad..b6f05fba7 100644 --- a/samples/snippets/writes/write_conditionally.py +++ b/samples/snippets/writes/write_conditionally.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # [START bigtable_writes_conditional] -import datetime +from datetime import datetime, timezone from google.cloud import bigtable from google.cloud.bigtable import row_filters @@ -24,7 +24,7 @@ def write_conditional(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - timestamp = datetime.datetime.utcnow() + timestamp = datetime.now(timezone.utc) column_family_id = "stats_summary" row_key = "phone#4c410523#20190501" diff --git a/samples/snippets/writes/write_simple.py b/samples/snippets/writes/write_simple.py index 1aa5a810f..fb7074bc5 100644 --- a/samples/snippets/writes/write_simple.py +++ b/samples/snippets/writes/write_simple.py @@ -14,7 +14,7 @@ # limitations under the License. # [START bigtable_writes_simple] -import datetime +from datetime import datetime, timezone from google.cloud import bigtable @@ -24,7 +24,7 @@ def write_simple(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - timestamp = datetime.datetime.utcnow() + timestamp = datetime.now(timezone.utc) column_family_id = "stats_summary" row_key = "phone#4c410523#20190501" diff --git a/samples/snippets/writes/writes_test.py b/samples/snippets/writes/writes_test.py index 77ae883d6..2c7a3d62b 100644 --- a/samples/snippets/writes/writes_test.py +++ b/samples/snippets/writes/writes_test.py @@ -13,48 +13,27 @@ # limitations under the License. import os -import uuid import backoff from google.api_core.exceptions import DeadlineExceeded -from google.cloud import bigtable import pytest +import uuid from .write_batch import write_batch from .write_conditionally import write_conditional from .write_increment import write_increment from .write_simple import write_simple - +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" - - -@pytest.fixture -def bigtable_client(): - return bigtable.Client(project=PROJECT, admin=True) +TABLE_ID = f"mobile-time-series-writes-{str(uuid.uuid4())[:16]}" @pytest.fixture -def bigtable_instance(bigtable_client): - return bigtable_client.instance(BIGTABLE_INSTANCE) - - -@pytest.fixture -def table_id(bigtable_instance): - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = bigtable_instance.table(table_id) - if table.exists(): - table.delete() - - column_family_id = "stats_summary" - column_families = {column_family_id: None} - table.create(column_families=column_families) - - yield table_id - - table.delete() +def table_id(): + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None}): + yield TABLE_ID def test_writes(capsys, table_id): diff --git a/samples/tableadmin/__init__.py b/samples/tableadmin/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/samples/tableadmin/noxfile.py b/samples/tableadmin/noxfile.py index 7c8a63994..a169b5b5b 100644 --- a/samples/tableadmin/noxfile.py +++ b/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt index ca1f33bd3..f01fd134c 100644 --- a/samples/tableadmin/requirements-test.txt +++ b/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.3.1 -google-cloud-testutils==1.3.3 +pytest +google-cloud-testutils==1.7.0 diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt index 909f8c365..730d25dec 100644 --- a/samples/tableadmin/requirements.txt +++ b/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.35.0 diff --git a/samples/tableadmin/tableadmin.py b/samples/tableadmin/tableadmin.py index 7c28601fb..ad00e5788 100644 --- a/samples/tableadmin/tableadmin.py +++ b/samples/tableadmin/tableadmin.py @@ -35,36 +35,7 @@ from google.cloud import bigtable from google.cloud.bigtable import column_family - - -def create_table(project_id, instance_id, table_id): - """Create a Bigtable table - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - - :type table_id: str - :param table_id: Table id to create table. - """ - - client = bigtable.Client(project=project_id, admin=True) - instance = client.instance(instance_id) - table = instance.table(table_id) - - # Check whether table exists in an instance. - # Create table if it does not exists. - print("Checking if table {} exists...".format(table_id)) - if table.exists(): - print("Table {} already exists.".format(table_id)) - else: - print("Creating the {} table.".format(table_id)) - table.create() - print("Created table {}.".format(table_id)) - - return client, instance, table +from ..utils import create_table_cm def run_table_operations(project_id, instance_id, table_id): @@ -80,154 +51,155 @@ def run_table_operations(project_id, instance_id, table_id): :param table_id: Table id to create table. """ - client, instance, table = create_table(project_id, instance_id, table_id) + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + with create_table_cm(project_id, instance_id, table_id, verbose=False) as table: + # [START bigtable_list_tables] + tables = instance.list_tables() + print("Listing tables in current project...") + if tables != []: + for tbl in tables: + print(tbl.table_id) + else: + print("No table exists in current project...") + # [END bigtable_list_tables] + + # [START bigtable_create_family_gc_max_age] + print("Creating column family cf1 with with MaxAge GC Rule...") + # Create a column family with GC policy : maximum age + # where age = current time minus cell timestamp + + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + column_family1 = table.column_family("cf1", max_age_rule) + column_family1.create() + print("Created column family cf1 with MaxAge GC Rule.") + # [END bigtable_create_family_gc_max_age] + + # [START bigtable_create_family_gc_max_versions] + print("Creating column family cf2 with max versions GC rule...") + # Create a column family with GC policy : most recent N versions + # where 1 = most recent version + + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + + column_family2 = table.column_family("cf2", max_versions_rule) + column_family2.create() + print("Created column family cf2 with Max Versions GC Rule.") + # [END bigtable_create_family_gc_max_versions] + + # [START bigtable_create_family_gc_union] + print("Creating column family cf3 with union GC rule...") + # Create a column family with GC policy to drop data that matches + # at least one condition. + # Define a GC rule to drop cells older than 5 days or not the + # most recent version + union_rule = column_family.GCRuleUnion( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) - # [START bigtable_list_tables] - tables = instance.list_tables() - print("Listing tables in current project...") - if tables != []: - for tbl in tables: - print(tbl.table_id) - else: - print("No table exists in current project...") - # [END bigtable_list_tables] - - # [START bigtable_create_family_gc_max_age] - print("Creating column family cf1 with with MaxAge GC Rule...") - # Create a column family with GC policy : maximum age - # where age = current time minus cell timestamp - - # Define the GC rule to retain data with max age of 5 days - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - - column_family1 = table.column_family("cf1", max_age_rule) - column_family1.create() - print("Created column family cf1 with MaxAge GC Rule.") - # [END bigtable_create_family_gc_max_age] - - # [START bigtable_create_family_gc_max_versions] - print("Creating column family cf2 with max versions GC rule...") - # Create a column family with GC policy : most recent N versions - # where 1 = most recent version - - # Define the GC policy to retain only the most recent 2 versions - max_versions_rule = column_family.MaxVersionsGCRule(2) - - column_family2 = table.column_family("cf2", max_versions_rule) - column_family2.create() - print("Created column family cf2 with Max Versions GC Rule.") - # [END bigtable_create_family_gc_max_versions] - - # [START bigtable_create_family_gc_union] - print("Creating column family cf3 with union GC rule...") - # Create a column family with GC policy to drop data that matches - # at least one condition. - # Define a GC rule to drop cells older than 5 days or not the - # most recent version - union_rule = column_family.GCRuleUnion( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2), - ] - ) + column_family3 = table.column_family("cf3", union_rule) + column_family3.create() + print("Created column family cf3 with Union GC rule") + # [END bigtable_create_family_gc_union] + + # [START bigtable_create_family_gc_intersection] + print("Creating column family cf4 with Intersection GC rule...") + # Create a column family with GC policy to drop data that matches + # all conditions + # GC rule: Drop cells older than 5 days AND older than the most + # recent 2 versions + intersection_rule = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) - column_family3 = table.column_family("cf3", union_rule) - column_family3.create() - print("Created column family cf3 with Union GC rule") - # [END bigtable_create_family_gc_union] - - # [START bigtable_create_family_gc_intersection] - print("Creating column family cf4 with Intersection GC rule...") - # Create a column family with GC policy to drop data that matches - # all conditions - # GC rule: Drop cells older than 5 days AND older than the most - # recent 2 versions - intersection_rule = column_family.GCRuleIntersection( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2), - ] - ) + column_family4 = table.column_family("cf4", intersection_rule) + column_family4.create() + print("Created column family cf4 with Intersection GC rule.") + # [END bigtable_create_family_gc_intersection] + + # [START bigtable_create_family_gc_nested] + print("Creating column family cf5 with a Nested GC rule...") + # Create a column family with nested GC policies. + # Create a nested GC rule: + # Drop cells that are either older than the 10 recent versions + # OR + # Drop cells that are older than a month AND older than the + # 2 recent versions + rule1 = column_family.MaxVersionsGCRule(10) + rule2 = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=30)), + column_family.MaxVersionsGCRule(2), + ] + ) - column_family4 = table.column_family("cf4", intersection_rule) - column_family4.create() - print("Created column family cf4 with Intersection GC rule.") - # [END bigtable_create_family_gc_intersection] - - # [START bigtable_create_family_gc_nested] - print("Creating column family cf5 with a Nested GC rule...") - # Create a column family with nested GC policies. - # Create a nested GC rule: - # Drop cells that are either older than the 10 recent versions - # OR - # Drop cells that are older than a month AND older than the - # 2 recent versions - rule1 = column_family.MaxVersionsGCRule(10) - rule2 = column_family.GCRuleIntersection( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=30)), - column_family.MaxVersionsGCRule(2), - ] - ) + nested_rule = column_family.GCRuleUnion([rule1, rule2]) + + column_family5 = table.column_family("cf5", nested_rule) + column_family5.create() + print("Created column family cf5 with a Nested GC rule.") + # [END bigtable_create_family_gc_nested] + + # [START bigtable_list_column_families] + print("Printing Column Family and GC Rule for all column families...") + column_families = table.list_column_families() + for column_family_name, gc_rule in sorted(column_families.items()): + print("Column Family:", column_family_name) + print("GC Rule:") + print(gc_rule.to_pb()) + # Sample output: + # Column Family: cf4 + # GC Rule: + # gc_rule { + # intersection { + # rules { + # max_age { + # seconds: 432000 + # } + # } + # rules { + # max_num_versions: 2 + # } + # } + # } + # [END bigtable_list_column_families] + + print("Print column family cf1 GC rule before update...") + print("Column Family: cf1") + print(column_family1.to_pb()) + + # [START bigtable_update_gc_rule] + print("Updating column family cf1 GC rule...") + # Update the column family cf1 to update the GC rule + column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1)) + column_family1.update() + print("Updated column family cf1 GC rule\n") + # [END bigtable_update_gc_rule] + + print("Print column family cf1 GC rule after update...") + print("Column Family: cf1") + print(column_family1.to_pb()) + + # [START bigtable_delete_family] + print("Delete a column family cf2...") + # Delete a column family + column_family2.delete() + print("Column family cf2 deleted successfully.") + # [END bigtable_delete_family] - nested_rule = column_family.GCRuleUnion([rule1, rule2]) - - column_family5 = table.column_family("cf5", nested_rule) - column_family5.create() - print("Created column family cf5 with a Nested GC rule.") - # [END bigtable_create_family_gc_nested] - - # [START bigtable_list_column_families] - print("Printing Column Family and GC Rule for all column families...") - column_families = table.list_column_families() - for column_family_name, gc_rule in sorted(column_families.items()): - print("Column Family:", column_family_name) - print("GC Rule:") - print(gc_rule.to_pb()) - # Sample output: - # Column Family: cf4 - # GC Rule: - # gc_rule { - # intersection { - # rules { - # max_age { - # seconds: 432000 - # } - # } - # rules { - # max_num_versions: 2 - # } - # } - # } - # [END bigtable_list_column_families] - - print("Print column family cf1 GC rule before update...") - print("Column Family: cf1") - print(column_family1.to_pb()) - - # [START bigtable_update_gc_rule] - print("Updating column family cf1 GC rule...") - # Update the column family cf1 to update the GC rule - column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1)) - column_family1.update() - print("Updated column family cf1 GC rule\n") - # [END bigtable_update_gc_rule] - - print("Print column family cf1 GC rule after update...") - print("Column Family: cf1") - print(column_family1.to_pb()) - - # [START bigtable_delete_family] - print("Delete a column family cf2...") - # Delete a column family - column_family2.delete() - print("Column family cf2 deleted successfully.") - # [END bigtable_delete_family] - - print( - 'execute command "python tableadmin.py delete [project_id] \ - [instance_id] --table [tableName]" to delete the table.' - ) + print( + 'execute command "python tableadmin.py delete [project_id] \ + [instance_id] --table [tableName]" to delete the table.' + ) def delete_table(project_id, instance_id, table_id): diff --git a/samples/tableadmin/tableadmin_test.py b/samples/tableadmin/tableadmin_test.py index 3063eee9f..0ffdc75c9 100755 --- a/samples/tableadmin/tableadmin_test.py +++ b/samples/tableadmin/tableadmin_test.py @@ -14,29 +14,25 @@ # limitations under the License. import os -import uuid - -from google.api_core import exceptions from test_utils.retry import RetryErrors +from google.api_core import exceptions +import uuid -from tableadmin import create_table -from tableadmin import delete_table -from tableadmin import run_table_operations +from .tableadmin import delete_table +from .tableadmin import run_table_operations +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "tableadmin-test-{}" +TABLE_ID = f"tableadmin-test-{str(uuid.uuid4())[:16]}" retry_429_503 = RetryErrors(exceptions.TooManyRequests, exceptions.ServiceUnavailable) def test_run_table_operations(capsys): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - - retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) out, _ = capsys.readouterr() - assert "Creating the " + table_id + " table." in out assert "Listing tables in current project." in out assert "Creating column family cf1 with with MaxAge GC Rule" in out assert "Created column family cf1 with MaxAge GC Rule." in out @@ -53,14 +49,11 @@ def test_run_table_operations(capsys): assert "Delete a column family cf2..." in out assert "Column family cf2 deleted successfully." in out - retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) - def test_delete_table(capsys): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - retry_429_503(create_table)(PROJECT, BIGTABLE_INSTANCE, table_id) - - retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) + table_id = f"table-admin-to-delete-{str(uuid.uuid4())[:16]}" + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, table_id, verbose=False): + delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert "Table " + table_id + " exists." in out diff --git a/samples/testdata/README.md b/samples/testdata/README.md new file mode 100644 index 000000000..57520179f --- /dev/null +++ b/samples/testdata/README.md @@ -0,0 +1,5 @@ +#### To generate singer_pb2.py and descriptors.pb file from singer.proto using `protoc` +```shell +cd samples +protoc --proto_path=testdata/ --include_imports --descriptor_set_out=testdata/descriptors.pb --python_out=testdata/ testdata/singer.proto +``` \ No newline at end of file diff --git a/samples/testdata/descriptors.pb b/samples/testdata/descriptors.pb new file mode 100644 index 000000000..bddf04de3 Binary files /dev/null and b/samples/testdata/descriptors.pb differ diff --git a/samples/testdata/singer.proto b/samples/testdata/singer.proto new file mode 100644 index 000000000..d60e0dfb3 --- /dev/null +++ b/samples/testdata/singer.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package examples.bigtable.music; + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} + +message Singer { + string name = 1; + Genre genre = 2; +} diff --git a/samples/testdata/singer_pb2.py b/samples/testdata/singer_pb2.py new file mode 100644 index 000000000..d2a328df0 --- /dev/null +++ b/samples/testdata/singer_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: singer.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0csinger.proto\x12\x17\x65xamples.bigtable.music\"E\n\x06Singer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x05genre\x18\x02 \x01(\x0e\x32\x1e.examples.bigtable.music.Genre*.\n\x05Genre\x12\x07\n\x03POP\x10\x00\x12\x08\n\x04JAZZ\x10\x01\x12\x08\n\x04\x46OLK\x10\x02\x12\x08\n\x04ROCK\x10\x03\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'singer_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _GENRE._serialized_start=112 + _GENRE._serialized_end=158 + _SINGER._serialized_start=41 + _SINGER._serialized_end=110 +# @@protoc_insertion_point(module_scope) diff --git a/samples/utils.py b/samples/utils.py new file mode 100644 index 000000000..f796aaedb --- /dev/null +++ b/samples/utils.py @@ -0,0 +1,99 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provides helper logic used across samples +""" + + +from google.cloud import bigtable +from google.cloud.bigtable.column_family import ColumnFamily +from google.cloud.bigtable_admin_v2.types import ColumnFamily as ColumnFamily_pb +from google.api_core import exceptions +from google.api_core.retry import Retry +from google.api_core.retry import if_exception_type + +delete_retry = Retry(if_exception_type(exceptions.TooManyRequests, exceptions.ServiceUnavailable)) + +class create_table_cm: + """ + Create a new table using a context manager, to ensure that table.delete() is called to clean up + the table, even if an exception is thrown + """ + def __init__(self, *args, verbose=True, **kwargs): + self._args = args + self._kwargs = kwargs + self._verbose = verbose + + def __enter__(self): + self._table = create_table(*self._args, **self._kwargs) + if self._verbose: + print(f"created table: {self._table.table_id}") + return self._table + + def __exit__(self, *args): + if self._table.exists(): + if self._verbose: + print(f"deleting table: {self._table.table_id}") + delete_retry(self._table.delete()) + else: + if self._verbose: + print(f"table {self._table.table_id} not found") + + +def create_table(project, instance_id, table_id, column_families={}): + """ + Creates a new table, and blocks until it reaches a ready state + """ + client = bigtable.Client(project=project, admin=True) + instance = client.instance(instance_id) + + table = instance.table(table_id) + if table.exists(): + table.delete() + + # convert column families to pb if needed + pb_families = { + id: ColumnFamily(id, table, rule).to_pb() if not isinstance(rule, ColumnFamily_pb) else rule + for (id, rule) in column_families.items() + } + + # create table using gapic layer + instance._client.table_admin_client.create_table( + request={ + "parent": instance.name, + "table_id": table_id, + "table": {"column_families": pb_families}, + } + ) + + wait_for_table(table) + + return table + +@Retry( + on_error=if_exception_type( + exceptions.PreconditionFailed, + exceptions.FailedPrecondition, + exceptions.NotFound, + ), + timeout=120, +) +def wait_for_table(table): + """ + raises an exception if the table does not exist or is not ready to use + + Because this method is wrapped with an api_core.Retry decorator, it will + retry with backoff if the table is not ready + """ + if not table.exists(): + raise exceptions.NotFound \ No newline at end of file diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index 0018b421d..120b0ddc4 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2023 Google LLC All rights reserved. +# Copyright 2024 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_admin_v2_keywords.py similarity index 83% rename from scripts/fixup_bigtable_admin_v2_keywords.py rename to scripts/fixup_admin_v2_keywords.py index 6882feaf6..d287df24f 100644 --- a/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/scripts/fixup_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,40 +36,52 @@ def partition( return results[1], results[0] -class bigtable_adminCallTransformer(cst.CSTTransformer): +class adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_consistency': ('name', 'consistency_token', ), + 'check_consistency': ('name', 'consistency_token', 'standard_read_remote_writes', 'data_boost_read_local_writes', ), 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_authorized_view': ('parent', 'authorized_view_id', 'authorized_view', ), 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_logical_view': ('parent', 'logical_view_id', 'logical_view', ), + 'create_materialized_view': ('parent', 'materialized_view_id', 'materialized_view', ), 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_authorized_view': ('name', 'etag', ), 'delete_backup': ('name', ), 'delete_cluster': ('name', ), 'delete_instance': ('name', ), + 'delete_logical_view': ('name', 'etag', ), + 'delete_materialized_view': ('name', 'etag', ), 'delete_snapshot': ('name', ), 'delete_table': ('name', ), 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), 'generate_consistency_token': ('name', ), 'get_app_profile': ('name', ), + 'get_authorized_view': ('name', 'view', ), 'get_backup': ('name', ), 'get_cluster': ('name', ), 'get_iam_policy': ('resource', 'options', ), 'get_instance': ('name', ), + 'get_logical_view': ('name', ), + 'get_materialized_view': ('name', ), 'get_snapshot': ('name', ), 'get_table': ('name', 'view', ), 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_authorized_views': ('parent', 'page_size', 'page_token', 'view', ), 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), 'list_clusters': ('parent', 'page_token', ), 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), 'list_instances': ('parent', 'page_token', ), + 'list_logical_views': ('parent', 'page_size', 'page_token', ), + 'list_materialized_views': ('parent', 'page_size', 'page_token', ), 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), - 'modify_column_families': ('name', 'modifications', ), + 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), 'partial_update_cluster': ('cluster', 'update_mask', ), 'partial_update_instance': ('instance', 'update_mask', ), 'restore_table': ('parent', 'table_id', 'backup', ), @@ -78,10 +90,13 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'test_iam_permissions': ('resource', 'permissions', ), 'undelete_table': ('name', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), - 'update_table': ('table', 'update_mask', ), + 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', ), + 'update_logical_view': ('logical_view', 'update_mask', ), + 'update_materialized_view': ('materialized_view', 'update_mask', ), + 'update_table': ('table', 'update_mask', 'ignore_warnings', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: @@ -130,7 +145,7 @@ def fix_files( in_dir: pathlib.Path, out_dir: pathlib.Path, *, - transformer=bigtable_adminCallTransformer(), + transformer=adminCallTransformer(), ): """Duplicate the input dir to the output dir, fixing file method calls. @@ -163,7 +178,7 @@ def fix_files( if __name__ == '__main__': parser = argparse.ArgumentParser( - description="""Fix up source that uses the bigtable_admin client library. + description="""Fix up source that uses the admin client library. The existing sources are NOT overwritten but are copied to output_dir with changes made. diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py deleted file mode 100644 index 8d32e5b70..000000000 --- a/scripts/fixup_bigtable_v2_keywords.py +++ /dev/null @@ -1,184 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class bigtableCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), - 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), - 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), - 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), - 'ping_and_warm': ('name', 'app_profile_id', ), - 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), - 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), - 'sample_row_keys': ('table_name', 'app_profile_id', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=bigtableCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the bigtable client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py index 1acc11983..8f5e248a0 100644 --- a/scripts/readme-gen/readme_gen.py +++ b/scripts/readme-gen/readme_gen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index 495730888..c8f13c372 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +# DO NOT EDIT THIS FILE OUTSIDE OF `.librarian/generator-input` +# The source of truth for this file is `.librarian/generator-input` + + import io import os @@ -37,12 +41,14 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.4, <3.0.0dev", - "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.22.0, <2.0.0dev", - "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-api-core[grpc] >= 2.17.0, <3.0.0", + "google-cloud-core >= 1.4.4, <3.0.0", + "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0", + "proto-plus >= 1.22.3, <2.0.0", + "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", + "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-crc32c>=1.5.0, <2.0.0dev", ] extras = {"libcst": "libcst >= 0.2.5"} @@ -59,16 +65,10 @@ # benchmarks, etc. packages = [ package - for package in setuptools.PEP420PackageFinder.find() + for package in setuptools.find_namespace_packages() if package.startswith("google") ] -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - setuptools.setup( name=name, version=version, @@ -88,18 +88,16 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Operating System :: OS Independent", "Topic :: Internet", ], platforms="Posix; MacOS X; Windows", packages=packages, - namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - scripts=[ - "scripts/fixup_bigtable_v2_keywords.py", - "scripts/fixup_bigtable_admin_v2_keywords.py", - ], python_requires=">=3.7", include_package_data=True, zip_safe=False, diff --git a/test_proxy/README.md b/test_proxy/README.md new file mode 100644 index 000000000..5c87c729a --- /dev/null +++ b/test_proxy/README.md @@ -0,0 +1,61 @@ +# CBT Python Test Proxy + +The CBT test proxy is intended for running conformance tests for Cloud Bigtable Python Client. + +## Option 1: Run Tests with Nox + +You can run the conformance tests in a single line by calling `nox -s conformance` from the repo root + + +``` +cd python-bigtable +nox -s conformance +``` + +## Option 2: Run processes manually + +### Start test proxy + +You can use `test_proxy.py` to launch a new test proxy process directly + +``` +cd python-bigtable/test_proxy +python test_proxy.py +``` + +The port can be set by passing in an extra positional argument + +``` +cd python-bigtable/test_proxy +python test_proxy.py --port 8080 +``` + +By default, the test_proxy targets the async client. You can change this by passing in the `--client_type` flag. +Valid options are `async`, `sync`, and `legacy`. + +``` +python test_proxy.py --client_type=legacy +``` + +### Run the test cases + +Prerequisites: +- If you have not already done so, [install golang](https://go.dev/doc/install). +- Before running tests, [launch an instance of the test proxy](#start-test-proxy) +in a separate shell session, and make note of the port + + +Clone and navigate to the go test library: + +``` +git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +cd cloud-bigtable-clients-test/tests +``` + + +Launch the tests + +``` +go test -v -proxy_addr=:50055 +``` + diff --git a/test_proxy/handlers/client_handler_data_async.py b/test_proxy/handlers/client_handler_data_async.py new file mode 100644 index 000000000..246b7fcd7 --- /dev/null +++ b/test_proxy/handlers/client_handler_data_async.py @@ -0,0 +1,301 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data import BigtableDataClientAsync +from google.cloud.bigtable.data._cross_sync import CrossSync +from helpers import sql_encoding_helpers + +if not CrossSync.is_async: + from client_handler_data_async import error_safe + +__CROSS_SYNC_OUTPUT__ = "test_proxy.handlers.client_handler_data_sync_autogen" + + +@CrossSync.drop +def error_safe(func): + """ + Catch and pass errors back to the grpc_server_process + Also check if client is closed before processing requests + """ + + async def wrapper(self, *args, **kwargs): + try: + if self.closed: + raise RuntimeError("client is closed") + return await func(self, *args, **kwargs) + except (Exception, NotImplementedError) as e: + # exceptions should be raised in grpc_server_process + return encode_exception(e) + + return wrapper + + +@CrossSync.drop +def encode_exception(exc): + """ + Encode an exception or chain of exceptions to pass back to grpc_handler + """ + from google.api_core.exceptions import GoogleAPICallError + + error_msg = f"{type(exc).__name__}: {exc}" + result = {"error": error_msg} + if exc.__cause__: + result["cause"] = encode_exception(exc.__cause__) + if hasattr(exc, "exceptions"): + result["subexceptions"] = [encode_exception(e) for e in exc.exceptions] + if hasattr(exc, "index"): + result["index"] = exc.index + if isinstance(exc, GoogleAPICallError): + if exc.grpc_status_code is not None: + result["code"] = exc.grpc_status_code.value[0] + elif exc.code is not None: + result["code"] = int(exc.code) + else: + result["code"] = -1 + elif result.get("cause", {}).get("code", None): + # look for code code in cause + result["code"] = result["cause"]["code"] + elif result.get("subexceptions", None): + # look for code in subexceptions + for subexc in result["subexceptions"]: + if subexc.get("code", None): + result["code"] = subexc["code"] + return result + + +@CrossSync.convert_class("TestProxyClientHandler") +class TestProxyClientHandlerAsync: + """ + Implements the same methods as the grpc server, but handles the client + library side of the request. + + Requests received in TestProxyGrpcServer are converted to a dictionary, + and supplied to the TestProxyClientHandler methods as kwargs. + The client response is then returned back to the TestProxyGrpcServer + """ + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = CrossSync.DataClient(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + # TODO: call self.client.close() + self.closed = True + + @error_safe + async def ReadRows(self, request, **kwargs): + table_id = request.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_list = CrossSync.rm_aio(await table.read_rows(request, **kwargs)) + # pack results back into protobuf-parsable format + serialized_response = [row._to_dict() for row in result_list] + return serialized_response + + @error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_row = CrossSync.rm_aio(await table.read_row(row_key, **kwargs)) + # pack results back into protobuf-parsable format + if result_row: + return result_row._to_dict() + else: + return "None" + + @error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + mutations = [Mutation._from_dict(d) for d in request["mutations"]] + CrossSync.rm_aio(await table.mutate_row(row_key, mutations, **kwargs)) + return "OK" + + @error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import RowMutationEntry + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + entry_list = [ + RowMutationEntry._from_dict(entry) for entry in request["entries"] + ] + CrossSync.rm_aio(await table.bulk_mutate_rows(entry_list, **kwargs)) + return "OK" + + @error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation, SetCell + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + # add default values for incomplete dicts, so they can still be parsed to objects + true_mutations = [] + for mut_dict in request.get("true_mutations", []): + try: + true_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + mutation = SetCell("", "", "", 0) + true_mutations.append(mutation) + false_mutations = [] + for mut_dict in request.get("false_mutations", []): + try: + false_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + false_mutations.append(SetCell("", "", "", 0)) + predicate_filter = request.get("predicate_filter", None) + result = CrossSync.rm_aio( + await table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs, + ) + ) + return result + + @error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + rules = [] + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + if "append_value" in rule_dict: + new_rule = AppendValueRule( + rule_dict["family_name"], qualifier, rule_dict["append_value"] + ) + else: + new_rule = IncrementRule( + rule_dict["family_name"], qualifier, rule_dict["increment_amount"] + ) + rules.append(new_rule) + result = CrossSync.rm_aio( + await table.read_modify_write_row(row_key, rules, **kwargs) + ) + # pack results back into protobuf-parsable format + if result: + return result._to_dict() + else: + return "None" + + @error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = CrossSync.rm_aio(await table.sample_row_keys(**kwargs)) + return result + + @error_safe + async def ExecuteQuery(self, request, **kwargs): + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + query = request.get("query") + params = request.get("params") or {} + # Note that the request has been coverted to json, and the code for this converts + # query param names to snake case. convert_params reverses this conversion. For this + # reason, snake case params will have issues if they're used in the conformance tests. + formatted_params, parameter_types = sql_encoding_helpers.convert_params(params) + operation_timeout = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = CrossSync.rm_aio( + await self.client.execute_query( + query, + self.instance_id, + parameters=formatted_params, + parameter_types=parameter_types, + app_profile_id=app_profile_id, + operation_timeout=operation_timeout, + prepare_operation_timeout=operation_timeout, + ) + ) + rows = CrossSync.rm_aio([r async for r in result]) + md = result.metadata + proto_rows = [] + for r in rows: + vals = [] + for c in md.columns: + vals.append(sql_encoding_helpers.convert_value(c.column_type, r[c.column_name])) + + proto_rows.append({"values": vals}) + + proto_columns = [] + for c in md.columns: + proto_columns.append( + { + "name": c.column_name, + "type": sql_encoding_helpers.convert_type(c.column_type), + } + ) + + return { + "metadata": {"columns": proto_columns}, + "rows": proto_rows, + } diff --git a/test_proxy/handlers/client_handler_data_sync_autogen.py b/test_proxy/handlers/client_handler_data_sync_autogen.py new file mode 100644 index 000000000..0e557f058 --- /dev/null +++ b/test_proxy/handlers/client_handler_data_sync_autogen.py @@ -0,0 +1,226 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +""" +This module contains the client handler process for proxy_server.py. +""" +import os +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data._cross_sync import CrossSync +from helpers import sql_encoding_helpers +from client_handler_data_async import error_safe + + +class TestProxyClientHandler: + """ + Implements the same methods as the grpc server, but handles the client + library side of the request. + + Requests received in TestProxyGrpcServer are converted to a dictionary, + and supplied to the TestProxyClientHandler methods as kwargs. + The client response is then returned back to the TestProxyGrpcServer + """ + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs + ): + self.closed = False + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = CrossSync._Sync_Impl.DataClient(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + self.closed = True + + @error_safe + async def ReadRows(self, request, **kwargs): + table_id = request.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_list = table.read_rows(request, **kwargs) + serialized_response = [row._to_dict() for row in result_list] + return serialized_response + + @error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_row = table.read_row(row_key, **kwargs) + if result_row: + return result_row._to_dict() + else: + return "None" + + @error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + mutations = [Mutation._from_dict(d) for d in request["mutations"]] + table.mutate_row(row_key, mutations, **kwargs) + return "OK" + + @error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import RowMutationEntry + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + entry_list = [ + RowMutationEntry._from_dict(entry) for entry in request["entries"] + ] + table.bulk_mutate_rows(entry_list, **kwargs) + return "OK" + + @error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation, SetCell + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + true_mutations = [] + for mut_dict in request.get("true_mutations", []): + try: + true_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + mutation = SetCell("", "", "", 0) + true_mutations.append(mutation) + false_mutations = [] + for mut_dict in request.get("false_mutations", []): + try: + false_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + false_mutations.append(SetCell("", "", "", 0)) + predicate_filter = request.get("predicate_filter", None) + result = table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs + ) + return result + + @error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + rules = [] + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + if "append_value" in rule_dict: + new_rule = AppendValueRule( + rule_dict["family_name"], qualifier, rule_dict["append_value"] + ) + else: + new_rule = IncrementRule( + rule_dict["family_name"], qualifier, rule_dict["increment_amount"] + ) + rules.append(new_rule) + result = table.read_modify_write_row(row_key, rules, **kwargs) + if result: + return result._to_dict() + else: + return "None" + + @error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = table.sample_row_keys(**kwargs) + return result + + @error_safe + async def ExecuteQuery(self, request, **kwargs): + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + query = request.get("query") + params = request.get("params") or {} + (formatted_params, parameter_types) = sql_encoding_helpers.convert_params( + params + ) + operation_timeout = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = self.client.execute_query( + query, + self.instance_id, + parameters=formatted_params, + parameter_types=parameter_types, + app_profile_id=app_profile_id, + operation_timeout=operation_timeout, + prepare_operation_timeout=operation_timeout, + ) + rows = [r for r in result] + md = result.metadata + proto_rows = [] + for r in rows: + vals = [] + for c in md.columns: + vals.append( + sql_encoding_helpers.convert_value(c.column_type, r[c.column_name]) + ) + proto_rows.append({"values": vals}) + proto_columns = [] + for c in md.columns: + proto_columns.append( + { + "name": c.column_name, + "type": sql_encoding_helpers.convert_type(c.column_type), + } + ) + return {"metadata": {"columns": proto_columns}, "rows": proto_rows} diff --git a/test_proxy/handlers/client_handler_legacy.py b/test_proxy/handlers/client_handler_legacy.py new file mode 100644 index 000000000..63fe357b0 --- /dev/null +++ b/test_proxy/handlers/client_handler_legacy.py @@ -0,0 +1,235 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.client import Client + +import client_handler_data_async as client_handler + +import warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) + + +class LegacyTestProxyClientHandler(client_handler.TestProxyClientHandlerAsync): + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = Client(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + self.closed = True + + @client_handler.error_safe + async def ReadRows(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + # app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + limit = request.get("rows_limit", None) + start_key = request.get("rows", {}).get("row_keys", [None])[0] + end_key = request.get("rows", {}).get("row_keys", [None])[-1] + end_inclusive = request.get("rows", {}).get("row_ranges", [{}])[-1].get("end_key_closed", True) + + row_list = [] + for row in table.read_rows(start_key=start_key, end_key=end_key, limit=limit, end_inclusive=end_inclusive): + # parse results into proto formatted dict + dict_val = {"row_key": row.row_key} + for family, family_cells in row.cells.items(): + family_dict = {"name": family} + for qualifier, qualifier_cells in family_cells.items(): + column_dict = {"qualifier": qualifier} + for cell in qualifier_cells: + cell_dict = { + "value": cell.value, + "timestamp_micros": cell.timestamp.timestamp() * 1000000, + "labels": cell.labels, + } + column_dict.setdefault("cells", []).append(cell_dict) + family_dict.setdefault("columns", []).append(column_dict) + dict_val.setdefault("families", []).append(family_dict) + row_list.append(dict_val) + return row_list + + @client_handler.error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + row = table.read_row(row_key) + # parse results into proto formatted dict + dict_val = {"row_key": row.row_key} + for family, family_cells in row.cells.items(): + family_dict = {"name": family} + for qualifier, qualifier_cells in family_cells.items(): + column_dict = {"qualifier": qualifier} + for cell in qualifier_cells: + cell_dict = { + "value": cell.value, + "timestamp_micros": cell.timestamp.timestamp() * 1000000, + "labels": cell.labels, + } + column_dict.setdefault("cells", []).append(cell_dict) + family_dict.setdefault("columns", []).append(column_dict) + dict_val.setdefault("families", []).append(family_dict) + return dict_val + + @client_handler.error_safe + async def MutateRow(self, request, **kwargs): + from datetime import datetime + from google.cloud.bigtable.row import DirectRow + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + row_key = request["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in request.get("mutations", []): + details = m_dict.get("set_cell") or m_dict.get("delete_from_column") or m_dict.get("delete_from_family") or m_dict.get("delete_from_row") + timestamp = datetime.fromtimestamp(details.get("timestamp_micros")) if details.get("timestamp_micros") else None + if m_dict.get("set_cell"): + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=timestamp) + elif m_dict.get("delete_from_column"): + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=timestamp) + elif m_dict.get("delete_from_family"): + new_row.delete_cells(details["family_name"], timestamp=timestamp) + elif m_dict.get("delete_from_row"): + new_row.delete() + table.mutate_rows([new_row]) + return "OK" + + @client_handler.error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.row import DirectRow + from datetime import datetime + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + rows = [] + for entry in request.get("entries", []): + row_key = entry["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in entry.get("mutations"): + details = m_dict.get("set_cell") or m_dict.get("delete_from_column") or m_dict.get("delete_from_family") or m_dict.get("delete_from_row") + timestamp = datetime.fromtimestamp(details.get("timestamp_micros")) if details.get("timestamp_micros") else None + if m_dict.get("set_cell"): + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=timestamp) + elif m_dict.get("delete_from_column"): + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=timestamp) + elif m_dict.get("delete_from_family"): + new_row.delete_cells(details["family_name"], timestamp=timestamp) + elif m_dict.get("delete_from_row"): + new_row.delete() + rows.append(new_row) + table.mutate_rows(rows) + return "OK" + + @client_handler.error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.row import ConditionalRow + from google.cloud.bigtable.row_filters import PassAllFilter + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + predicate_filter = request.get("predicate_filter", PassAllFilter(True)) + new_row = ConditionalRow(request["row_key"], table, predicate_filter) + + combined_mutations = [{"state": True, **m} for m in request.get("true_mutations", [])] + combined_mutations.extend([{"state": False, **m} for m in request.get("false_mutations", [])]) + for mut_dict in combined_mutations: + if "set_cell" in mut_dict: + details = mut_dict["set_cell"] + new_row.set_cell( + details.get("family_name", ""), + details.get("column_qualifier", ""), + details.get("value", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_column" in mut_dict: + details = mut_dict["delete_from_column"] + new_row.delete_cell( + details.get("family_name", ""), + details.get("column_qualifier", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_family" in mut_dict: + details = mut_dict["delete_from_family"] + new_row.delete_cells( + details.get("family_name", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_row" in mut_dict: + new_row.delete(state=mut_dict["state"]) + else: + raise RuntimeError(f"Unknown mutation type: {mut_dict}") + return new_row.commit() + + @client_handler.error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.row import AppendRow + from google.cloud._helpers import _microseconds_from_datetime + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + row_key = request["row_key"] + new_row = AppendRow(row_key, table) + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + family = rule_dict["family_name"] + if "append_value" in rule_dict: + new_row.append_cell_value(family, qualifier, rule_dict["append_value"]) + else: + new_row.increment_cell_value(family, qualifier, rule_dict["increment_amount"]) + raw_result = new_row.commit() + result_families = [] + for family, column_dict in raw_result.items(): + result_columns = [] + for column, cell_list in column_dict.items(): + result_cells = [] + for cell_tuple in cell_list: + cell_dict = {"value": cell_tuple[0], "timestamp_micros": _microseconds_from_datetime(cell_tuple[1])} + result_cells.append(cell_dict) + result_columns.append({"qualifier": column, "cells": result_cells}) + result_families.append({"name": family, "columns": result_columns}) + return {"key": row_key, "families": result_families} + + @client_handler.error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + response = list(table.sample_row_keys()) + tuple_response = [(s.row_key, s.offset_bytes) for s in response] + return tuple_response diff --git a/test_proxy/handlers/grpc_handler.py b/test_proxy/handlers/grpc_handler.py new file mode 100644 index 000000000..28ae19cf9 --- /dev/null +++ b/test_proxy/handlers/grpc_handler.py @@ -0,0 +1,210 @@ +import time + +import test_proxy_pb2 +import test_proxy_pb2_grpc +import data_pb2 +import bigtable_pb2 +from google.rpc.status_pb2 import Status +from google.protobuf import json_format + + +def correct_cancelled(status): + """ + Deadline exceeded errors are a race between client side cancellation and server + side deadline exceeded. For the purpose of these tests, the client will never cancel, + so we adjust cancelled errors to deadline_exceeded for consistency. + """ + if status.code == 1: + return Status(code=4, message="deadlineexceeded") + return status + + +class TestProxyGrpcServer(test_proxy_pb2_grpc.CloudBigtableV2TestProxyServicer): + """ + Implements a grpc server that proxies conformance test requests to the client library + + Due to issues with using protoc-compiled protos and client-library + proto-plus objects in the same process, this server defers requests to + matching methods in a TestProxyClientHandler instance in a separate + process. + This happens invisbly in the decorator @delegate_to_client_handler, with the + results attached to each request as a client_response kwarg + """ + + def __init__(self, request_q, queue_pool): + self.open_queues = list(range(len(queue_pool))) + self.queue_pool = queue_pool + self.request_q = request_q + + def delegate_to_client_handler(func, timeout_seconds=300): + """ + Decorator that transparently passes a request to the client + handler process, and then attaches the resonse to the wrapped call + """ + + def wrapper(self, request, context, **kwargs): + deadline = time.time() + timeout_seconds + json_dict = json_format.MessageToDict(request) + out_idx = self.open_queues.pop() + json_dict["proxy_request"] = func.__name__ + json_dict["response_queue_idx"] = out_idx + out_q = self.queue_pool[out_idx] + self.request_q.put(json_dict) + # wait for response + while time.time() < deadline: + if not out_q.empty(): + response = out_q.get() + self.open_queues.append(out_idx) + if isinstance(response, Exception): + raise response + else: + return func( + self, + request, + context, + client_response=response, + **kwargs, + ) + time.sleep(1e-4) + + return wrapper + + @delegate_to_client_handler + def CreateClient(self, request, context, client_response=None): + return test_proxy_pb2.CreateClientResponse() + + @delegate_to_client_handler + def CloseClient(self, request, context, client_response=None): + return test_proxy_pb2.CloseClientResponse() + + @delegate_to_client_handler + def RemoveClient(self, request, context, client_response=None): + return test_proxy_pb2.RemoveClientResponse() + + @delegate_to_client_handler + def ReadRows(self, request, context, client_response=None): + status = Status() + rows = [] + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled(Status(code=5, message=client_response["error"])) + else: + rows = [data_pb2.Row(**d) for d in client_response] + result = test_proxy_pb2.RowsResult(rows=rows, status=status) + return result + + @delegate_to_client_handler + def ReadRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) + ) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def MutateRow(self, request, context, client_response=None): + status = Status() + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) + ) + return test_proxy_pb2.MutateRowResult(status=status) + + @delegate_to_client_handler + def BulkMutateRows(self, request, context, client_response=None): + status = Status() + entries = [] + if isinstance(client_response, dict) and "error" in client_response: + entries = [ + bigtable_pb2.MutateRowsResponse.Entry( + index=exc_dict.get("index", 1), + status=correct_cancelled(Status(code=exc_dict.get("code", 5))), + ) + for exc_dict in client_response.get("subexceptions", []) + ] + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) + ) + response = test_proxy_pb2.MutateRowsResult(status=status, entries=entries) + return response + + @delegate_to_client_handler + def CheckAndMutateRow(self, request, context, client_response=None): + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) + ) + response = test_proxy_pb2.CheckAndMutateRowResult(status=status) + else: + result = bigtable_pb2.CheckAndMutateRowResponse( + predicate_matched=client_response + ) + response = test_proxy_pb2.CheckAndMutateRowResult( + result=result, status=Status() + ) + return response + + @delegate_to_client_handler + def ReadModifyWriteRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) + ) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def SampleRowKeys(self, request, context, client_response=None): + status = Status() + sample_list = [] + if isinstance(client_response, dict) and "error" in client_response: + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) + ) + else: + for sample in client_response: + sample_list.append( + bigtable_pb2.SampleRowKeysResponse( + offset_bytes=sample[1], row_key=sample[0] + ) + ) + return test_proxy_pb2.SampleRowKeysResult(status=status, samples=sample_list) + + @delegate_to_client_handler + def ExecuteQuery(self, request, context, client_response=None): + if isinstance(client_response, dict) and "error" in client_response: + return test_proxy_pb2.ExecuteQueryResult( + status=correct_cancelled( + Status(code=client_response.get("code", 13), message=client_response["error"]) + ) + ) + else: + return test_proxy_pb2.ExecuteQueryResult( + metadata=client_response["metadata"], rows=client_response["rows"] + ) diff --git a/test_proxy/handlers/helpers/sql_encoding_helpers.py b/test_proxy/handlers/helpers/sql_encoding_helpers.py new file mode 100644 index 000000000..9640ae3fd --- /dev/null +++ b/test_proxy/handlers/helpers/sql_encoding_helpers.py @@ -0,0 +1,183 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains helpers for handling sql data types for the test proxy. +""" +from datetime import date +from typing import Any + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.cloud.bigtable.data.execute_query.metadata import SqlType + + +PRIMITIVE_TYPE_MAPPING = { + "bytes_type": SqlType.Bytes(), + "string_type": SqlType.String(), + "int64_type": SqlType.Int64(), + "float32_type": SqlType.Float32(), + "float64_type": SqlType.Float64(), + "bool_type": SqlType.Bool(), + "timestamp_type": SqlType.Timestamp(), + "date_type": SqlType.Date(), +} + +PRIMITIVE_VALUE_FIELDS = [ + "bytes_value", + "string_value", + "int_value", + "float_value", + "bool_value", +] + + +def snake_to_camel(snake_string): + """ + Used to convert query parameter names back to camel case. This needs to be handled + specifically because the python test proxy converts all keys to snake case when it + converts proto messages to dicts. + """ + components = snake_string.split("_") + return components[0] + "".join(x.title() for x in components[1:]) + + +def convert_value(type: SqlType, val: Any): + """ + Converts python value to a dict representation of a protobuf Value message. + """ + if val is None: + return {} + elif isinstance(type, SqlType.Date): + return {"date_value": val} + elif isinstance(type, SqlType.Map): + key_type = type.key_type + val_type = type.value_type + results = [] + for k, v in val.items(): + results.append( + { + "array_value": { + "values": [ + convert_value(key_type, k), + convert_value(val_type, v), + ] + } + } + ) + return {"array_value": {"values": results}} + elif isinstance(type, SqlType.Struct): + results = [] + for i, (_, field_val) in enumerate(val.fields): + results.append(convert_value(type[i], field_val)) + return {"array_value": {"values": results}} + elif isinstance(type, SqlType.Array): + elem_type = type.element_type + results = [] + for e in val: + results.append(convert_value(elem_type, e)) + return {"array_value": {"values": results}} + else: + return type._to_value_pb_dict(val) + + +def convert_type(type: SqlType): + if isinstance(type, SqlType.Map): + return { + "map_type": { + "key_type": convert_type(type.key_type), + "value_type": convert_type(type.value_type), + } + } + elif isinstance(type, SqlType.Struct): + fields = [] + for field_name, field_type in type.fields: + fields.append({"field_name": field_name, "type": convert_type(field_type)}) + return {"struct_type": {"fields": fields}} + elif isinstance(type, SqlType.Array): + return {"array_type": {"element_type": convert_type(type.element_type)}} + else: + return type._to_type_pb_dict() + + +def to_sql_type(proto_type_dict): + if len(proto_type_dict.keys()) != 1: + raise ValueError("Invalid type: ", proto_type_dict) + type_field = list(proto_type_dict.keys())[0] + if type_field in PRIMITIVE_TYPE_MAPPING: + return PRIMITIVE_TYPE_MAPPING[type_field] + elif type_field == "array_type": + elem_type_dict = proto_type_dict["array_type"]["element_type"] + return SqlType.Array(to_sql_type(elem_type_dict)) + else: + raise ValueError("Invalid query parameter type: ", proto_type_dict) + + +def convert_to_python_value(proto_val: Any, sql_type: SqlType): + """ + Converts the given dict representation of a proto Value message to the correct + python value. This is used to convert query params to the represetation expected + from users. We can't reuse existing parsers because they expect actual proto messages + rather than dicts. + """ + value_field = sql_type.value_pb_dict_field_name + if isinstance(sql_type, SqlType.Array): + if "array_value" not in proto_val: + return None + elem_type = sql_type.element_type + return [ + convert_to_python_value(v, elem_type) + for v in proto_val["array_value"]["values"] + ] + if value_field and value_field not in proto_val: + return None + if value_field in PRIMITIVE_VALUE_FIELDS: + return proto_val[value_field] + if isinstance(sql_type, SqlType.Timestamp): + if "timestamp_value" not in proto_val: + return None + return DatetimeWithNanoseconds.from_rfc3339(proto_val["timestamp_value"]) + if isinstance(sql_type, SqlType.Date): + if "date_value" not in proto_val: + return None + return date( + year=proto_val["date_value"]["year"], + month=proto_val["date_value"]["month"], + day=proto_val["date_value"]["day"], + ) + raise ValueError("Unexpected parameter: %s, %s", proto_val, sql_type) + + +def convert_params(request_params): + """ + Converts the given dictionary of parameters to a python representation. + This converts parameter names from snake to camel case and protobuf Value dicts + to python values. + """ + python_params = {} + param_types = {} + for param_key, param_value in request_params.items(): + if "type" not in param_value: + raise ValueError("type must be set for query params") + + sql_type = to_sql_type(param_value["type"]) + readjusted_param_name = snake_to_camel(param_key) + param_types[readjusted_param_name] = sql_type + if len(param_value.keys()) == 1: + # this means type is set and nothing else + python_params[readjusted_param_name] = None + elif len(param_value) > 2: + raise ValueError("Unexpected Value format: ", param_value) + python_params[readjusted_param_name] = convert_to_python_value( + param_value, sql_type + ) + return python_params, param_types diff --git a/test_proxy/protos/bigtable_pb2.py b/test_proxy/protos/bigtable_pb2.py new file mode 100644 index 000000000..edc90c3ec --- /dev/null +++ b/test_proxy/protos/bigtable_pb2.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/bigtable/v2/bigtable.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/bigtable.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.api import routing_pb2 as google_dot_api_dot_routing__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +import request_stats_pb2 as google_dot_bigtable_dot_v2_dot_request__stats__pb2 +import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xcc\x04\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x0b \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\x12\x10\n\x08reversed\x18\x07 \x01(\x08\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"\x98\x02\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x04 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x05 \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x89\x02\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xd1\x02\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x05 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\xe4\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x12?\n\x0frate_limit_info\x18\x03 \x01(\x0b\x32!.google.bigtable.v2.RateLimitInfoH\x00\x88\x01\x01\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.StatusB\x12\n\x10_rate_limit_info\"J\n\rRateLimitInfo\x12)\n\x06period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0e\n\x06\x66\x61\x63tor\x18\x02 \x01(\x01\"\x81\x03\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\x99\x02\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xa9\n\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xb8\x01\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x0enew_partitions\x18\x03 \x03(\x0b\x32#.google.bigtable.v2.StreamPartitionB\x0f\n\rstream_record\"\xa1\x03\n\x13\x45xecuteQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x14\n\x05query\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x16\n\x0eprepared_query\x18\t \x01(\x0c\x12;\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatB\x02\x18\x01H\x00\x12\x19\n\x0cresume_token\x18\x08 \x01(\x0c\x42\x03\xe0\x41\x01\x12H\n\x06params\x18\x07 \x03(\x0b\x32\x33.google.bigtable.v2.ExecuteQueryRequest.ParamsEntryB\x03\xe0\x41\x02\x1aH\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x96\x01\n\x14\x45xecuteQueryResponse\x12\x39\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadataH\x00\x12\x37\n\x07results\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.PartialResultSetH\x00\x42\n\n\x08response\"\xf4\x02\n\x13PrepareQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x12\n\x05query\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatH\x00\x12Q\n\x0bparam_types\x18\x06 \x03(\x0b\x32\x37.google.bigtable.v2.PrepareQueryRequest.ParamTypesEntryB\x03\xe0\x41\x02\x1aK\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x98\x01\n\x14PrepareQueryResponse\x12\x37\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadata\x12\x16\n\x0eprepared_query\x18\x02 \x01(\x0c\x12/\n\x0bvalid_until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xc9&\n\x08\x42igtable\x12\xdb\x03\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\x81\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9a\x01\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xee\x03\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\x85\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\x12Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\x82\x04\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xa7\x03\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x82\xd3\xe4\x93\x02\x9c\x01\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xf5\x03\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\x95\x03\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xf6\x04\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\x83\x04\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x82\xd3\xe4\x93\x02\xac\x01\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\xda\x41\x04name\xda\x41\x13name,app_profile_id\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xa7\x04\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xb1\x03\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x82\xd3\xe4\x93\x02\xae\x01\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*0\x01\x12\xa9\x02\n\x0cPrepareQuery\x12\'.google.bigtable.v2.PrepareQueryRequest\x1a(.google.bigtable.v2.PrepareQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xab\x02\n\x0c\x45xecuteQuery\x12\'.google.bigtable.v2.ExecuteQueryRequest\x1a(.google.bigtable.v2.ExecuteQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x04\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}\xea\x41\x87\x01\n+bigtableadmin.googleapis.com/AuthorizedView\x12Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\xea\x41~\n-bigtableadmin.googleapis.com/MaterializedView\x12Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}\352A\207\001\n+bigtableadmin.googleapis.com/AuthorizedView\022Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\352A~\n-bigtableadmin.googleapis.com/MaterializedView\022Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}' + _globals['_READROWSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView' + _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._serialized_options = b'\340A\002' + _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._loaded_options = None + _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._serialized_options = b'\340A\002' + _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._serialized_options = b'\340A\002' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._loaded_options = None + _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._serialized_options = b'\340A\002' + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\030\001\340A\002' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._serialized_options = b'\030\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._serialized_options = b'\340A\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._serialized_options = b'\340A\002' + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._loaded_options = None + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_options = b'8\001' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\340A\002' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._serialized_options = b'\340A\002' + _globals['_BIGTABLE']._loaded_options = None + _globals['_BIGTABLE']._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only' + _globals['_BIGTABLE'].methods_by_name['ReadRows']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadRows']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\232\001\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\236\001\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\022Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['MutateRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['MutateRow']._serialized_options = b'\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id\202\323\344\223\002\234\001\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['MutateRows']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['MutateRows']._serialized_options = b'\332A\022table_name,entries\332A!table_name,entries,app_profile_id\202\323\344\223\002\236\001\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._serialized_options = b'\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\202\323\344\223\002\254\001\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._serialized_options = b'\332A\004name\332A\023name,app_profile_id\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._serialized_options = b'\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id\202\323\344\223\002\256\001\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*' + _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*' + _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_READROWSREQUEST']._serialized_start=424 + _globals['_READROWSREQUEST']._serialized_end=1012 + _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_start=910 + _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_end=1012 + _globals['_READROWSRESPONSE']._serialized_start=1015 + _globals['_READROWSRESPONSE']._serialized_end=1448 + _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_start=1187 + _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_end=1448 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1451 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1731 + _globals['_SAMPLEROWKEYSRESPONSE']._serialized_start=1733 + _globals['_SAMPLEROWKEYSRESPONSE']._serialized_end=1795 + _globals['_MUTATEROWREQUEST']._serialized_start=1798 + _globals['_MUTATEROWREQUEST']._serialized_end=2063 + _globals['_MUTATEROWRESPONSE']._serialized_start=2065 + _globals['_MUTATEROWRESPONSE']._serialized_end=2084 + _globals['_MUTATEROWSREQUEST']._serialized_start=2087 + _globals['_MUTATEROWSREQUEST']._serialized_end=2424 + _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_start=2346 + _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_end=2424 + _globals['_MUTATEROWSRESPONSE']._serialized_start=2427 + _globals['_MUTATEROWSRESPONSE']._serialized_end=2655 + _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_start=2577 + _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_end=2635 + _globals['_RATELIMITINFO']._serialized_start=2657 + _globals['_RATELIMITINFO']._serialized_end=2731 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=2734 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=3119 + _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_start=3121 + _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_end=3175 + _globals['_PINGANDWARMREQUEST']._serialized_start=3177 + _globals['_PINGANDWARMREQUEST']._serialized_end=3282 + _globals['_PINGANDWARMRESPONSE']._serialized_start=3284 + _globals['_PINGANDWARMRESPONSE']._serialized_end=3305 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=3308 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=3589 + _globals['_READMODIFYWRITEROWRESPONSE']._serialized_start=3591 + _globals['_READMODIFYWRITEROWRESPONSE']._serialized_end=3657 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_start=3660 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_end=3794 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_start=3796 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_end=3899 + _globals['_READCHANGESTREAMREQUEST']._serialized_start=3902 + _globals['_READCHANGESTREAMREQUEST']._serialized_end=4313 + _globals['_READCHANGESTREAMRESPONSE']._serialized_start=4316 + _globals['_READCHANGESTREAMRESPONSE']._serialized_end=5637 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_start=4584 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_end=4828 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_start=4739 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_end=4828 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_start=4831 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_end=5285 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_start=5205 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_end=5285 + _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_start=5288 + _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_end=5433 + _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_start=5436 + _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_end=5620 + _globals['_EXECUTEQUERYREQUEST']._serialized_start=5640 + _globals['_EXECUTEQUERYREQUEST']._serialized_end=6057 + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_start=5970 + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_end=6042 + _globals['_EXECUTEQUERYRESPONSE']._serialized_start=6060 + _globals['_EXECUTEQUERYRESPONSE']._serialized_end=6210 + _globals['_PREPAREQUERYREQUEST']._serialized_start=6213 + _globals['_PREPAREQUERYREQUEST']._serialized_end=6585 + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_start=6495 + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_end=6570 + _globals['_PREPAREQUERYRESPONSE']._serialized_start=6588 + _globals['_PREPAREQUERYRESPONSE']._serialized_end=6740 + _globals['_BIGTABLE']._serialized_start=6743 + _globals['_BIGTABLE']._serialized_end=11680 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/bigtable_pb2_grpc.py b/test_proxy/protos/bigtable_pb2_grpc.py new file mode 100644 index 000000000..ef4e5bed6 --- /dev/null +++ b/test_proxy/protos/bigtable_pb2_grpc.py @@ -0,0 +1,562 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/bigtable_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + _registered_method=True) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + _registered_method=True) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + _registered_method=True) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + _registered_method=True) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + _registered_method=True) + self.PingAndWarm = channel.unary_unary( + '/google.bigtable.v2.Bigtable/PingAndWarm', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + _registered_method=True) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + _registered_method=True) + self.GenerateInitialChangeStreamPartitions = channel.unary_stream( + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + _registered_method=True) + self.ReadChangeStream = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadChangeStream', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + _registered_method=True) + self.PrepareQuery = channel.unary_unary( + '/google.bigtable.v2.Bigtable/PrepareQuery', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString, + _registered_method=True) + self.ExecuteQuery = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ExecuteQuery', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString, + _registered_method=True) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PingAndWarm(self, request, context): + """Warm up associated instance metadata for this connection. + This call is not required but may be useful for connection keep-alive. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GenerateInitialChangeStreamPartitions(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire keyspace. + Partitions can be read with `ReadChangeStream`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadChangeStream(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that are caused by + garbage collection. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PrepareQuery(self, request, context): + """Prepares a GoogleSQL query for execution on a particular Bigtable instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExecuteQuery(self, request, context): + """Executes a SQL query against a particular Bigtable instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + 'PingAndWarm': grpc.unary_unary_rpc_method_handler( + servicer.PingAndWarm, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + 'GenerateInitialChangeStreamPartitions': grpc.unary_stream_rpc_method_handler( + servicer.GenerateInitialChangeStreamPartitions, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.SerializeToString, + ), + 'ReadChangeStream': grpc.unary_stream_rpc_method_handler( + servicer.ReadChangeStream, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.SerializeToString, + ), + 'PrepareQuery': grpc.unary_unary_rpc_method_handler( + servicer.PrepareQuery, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.SerializeToString, + ), + 'ExecuteQuery': grpc.unary_stream_rpc_method_handler( + servicer.ExecuteQuery, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('google.bigtable.v2.Bigtable', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class Bigtable(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ReadRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/SampleRowKeys', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/MutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def MutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/MutateRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PingAndWarm(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/PingAndWarm', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GenerateInitialChangeStreamPartitions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadChangeStream(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ReadChangeStream', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PrepareQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/PrepareQuery', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ExecuteQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ExecuteQuery', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/test_proxy/protos/data_pb2.py b/test_proxy/protos/data_pb2.py new file mode 100644 index 000000000..8b6e68df1 --- /dev/null +++ b/test_proxy/protos/data_pb2.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/bigtable/v2/data.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/data.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.type import date_pb2 as google_dot_type_dot_date__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x16google/type/date.proto\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\xf4\x02\n\x05Value\x12&\n\x04type\x18\x07 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x13\n\traw_value\x18\x08 \x01(\x0cH\x00\x12\x1e\n\x14raw_timestamp_micros\x18\t \x01(\x03H\x00\x12\x15\n\x0b\x62ytes_value\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x13\n\tint_value\x18\x06 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\n \x01(\x08H\x00\x12\x15\n\x0b\x66loat_value\x18\x0b \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\'\n\ndate_value\x18\r \x01(\x0b\x32\x11.google.type.DateH\x00\x12\x35\n\x0b\x61rray_value\x18\x04 \x01(\x0b\x32\x1e.google.bigtable.v2.ArrayValueH\x00\x42\x06\n\x04kind\"7\n\nArrayValue\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xad\x08\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12=\n\x0b\x61\x64\x64_to_cell\x18\x05 \x01(\x0b\x32&.google.bigtable.v2.Mutation.AddToCellH\x00\x12\x41\n\rmerge_to_cell\x18\x06 \x01(\x0b\x32(.google.bigtable.v2.Mutation.MergeToCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1a\xad\x01\n\tAddToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1a\xaf\x01\n\x0bMergeToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\t\"\r\n\x0bProtoFormat\"F\n\x0e\x43olumnMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\"B\n\x0bProtoSchema\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"V\n\x11ResultSetMetadata\x12\x37\n\x0cproto_schema\x18\x01 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoSchemaH\x00\x42\x08\n\x06schema\"6\n\tProtoRows\x12)\n\x06values\x18\x02 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"$\n\x0eProtoRowsBatch\x12\x12\n\nbatch_data\x18\x01 \x01(\x0c\"\xd5\x01\n\x10PartialResultSet\x12>\n\x10proto_rows_batch\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.ProtoRowsBatchH\x00\x12\x1b\n\x0e\x62\x61tch_checksum\x18\x06 \x01(\rH\x01\x88\x01\x01\x12\x14\n\x0cresume_token\x18\x05 \x01(\x0c\x12\r\n\x05reset\x18\x07 \x01(\x08\x12\x1c\n\x14\x65stimated_batch_size\x18\x04 \x01(\x05\x42\x0e\n\x0cpartial_rowsB\x11\n\x0f_batch_checksumB\xb3\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _globals['_ROW']._serialized_start=175 + _globals['_ROW']._serialized_end=239 + _globals['_FAMILY']._serialized_start=241 + _globals['_FAMILY']._serialized_end=308 + _globals['_COLUMN']._serialized_start=310 + _globals['_COLUMN']._serialized_end=378 + _globals['_CELL']._serialized_start=380 + _globals['_CELL']._serialized_end=443 + _globals['_VALUE']._serialized_start=446 + _globals['_VALUE']._serialized_end=818 + _globals['_ARRAYVALUE']._serialized_start=820 + _globals['_ARRAYVALUE']._serialized_end=875 + _globals['_ROWRANGE']._serialized_start=878 + _globals['_ROWRANGE']._serialized_end=1016 + _globals['_ROWSET']._serialized_start=1018 + _globals['_ROWSET']._serialized_end=1094 + _globals['_COLUMNRANGE']._serialized_start=1097 + _globals['_COLUMNRANGE']._serialized_end=1295 + _globals['_TIMESTAMPRANGE']._serialized_start=1297 + _globals['_TIMESTAMPRANGE']._serialized_end=1375 + _globals['_VALUERANGE']._serialized_start=1378 + _globals['_VALUERANGE']._serialized_end=1530 + _globals['_ROWFILTER']._serialized_start=1533 + _globals['_ROWFILTER']._serialized_end=2652 + _globals['_ROWFILTER_CHAIN']._serialized_start=2349 + _globals['_ROWFILTER_CHAIN']._serialized_end=2404 + _globals['_ROWFILTER_INTERLEAVE']._serialized_start=2406 + _globals['_ROWFILTER_INTERLEAVE']._serialized_end=2466 + _globals['_ROWFILTER_CONDITION']._serialized_start=2469 + _globals['_ROWFILTER_CONDITION']._serialized_end=2642 + _globals['_MUTATION']._serialized_start=2655 + _globals['_MUTATION']._serialized_end=3724 + _globals['_MUTATION_SETCELL']._serialized_start=3080 + _globals['_MUTATION_SETCELL']._serialized_end=3177 + _globals['_MUTATION_ADDTOCELL']._serialized_start=3180 + _globals['_MUTATION_ADDTOCELL']._serialized_end=3353 + _globals['_MUTATION_MERGETOCELL']._serialized_start=3356 + _globals['_MUTATION_MERGETOCELL']._serialized_end=3531 + _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_start=3533 + _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_end=3654 + _globals['_MUTATION_DELETEFROMFAMILY']._serialized_start=3656 + _globals['_MUTATION_DELETEFROMFAMILY']._serialized_end=3695 + _globals['_MUTATION_DELETEFROMROW']._serialized_start=3697 + _globals['_MUTATION_DELETEFROMROW']._serialized_end=3712 + _globals['_READMODIFYWRITERULE']._serialized_start=3727 + _globals['_READMODIFYWRITERULE']._serialized_end=3855 + _globals['_STREAMPARTITION']._serialized_start=3857 + _globals['_STREAMPARTITION']._serialized_end=3923 + _globals['_STREAMCONTINUATIONTOKENS']._serialized_start=3925 + _globals['_STREAMCONTINUATIONTOKENS']._serialized_end=4012 + _globals['_STREAMCONTINUATIONTOKEN']._serialized_start=4014 + _globals['_STREAMCONTINUATIONTOKEN']._serialized_end=4110 + _globals['_PROTOFORMAT']._serialized_start=4112 + _globals['_PROTOFORMAT']._serialized_end=4125 + _globals['_COLUMNMETADATA']._serialized_start=4127 + _globals['_COLUMNMETADATA']._serialized_end=4197 + _globals['_PROTOSCHEMA']._serialized_start=4199 + _globals['_PROTOSCHEMA']._serialized_end=4265 + _globals['_RESULTSETMETADATA']._serialized_start=4267 + _globals['_RESULTSETMETADATA']._serialized_end=4353 + _globals['_PROTOROWS']._serialized_start=4355 + _globals['_PROTOROWS']._serialized_end=4409 + _globals['_PROTOROWSBATCH']._serialized_start=4411 + _globals['_PROTOROWSBATCH']._serialized_end=4447 + _globals['_PARTIALRESULTSET']._serialized_start=4450 + _globals['_PARTIALRESULTSET']._serialized_end=4663 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/data_pb2_grpc.py b/test_proxy/protos/data_pb2_grpc.py new file mode 100644 index 000000000..f7a5195e8 --- /dev/null +++ b/test_proxy/protos/data_pb2_grpc.py @@ -0,0 +1,24 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/data_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) diff --git a/test_proxy/protos/request_stats_pb2.py b/test_proxy/protos/request_stats_pb2.py new file mode 100644 index 000000000..95fcc6e0f --- /dev/null +++ b/test_proxy/protos/request_stats_pb2.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/request_stats.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/bigtable/v2/request_stats.proto\x12\x12google.bigtable.v2\x1a\x1egoogle/protobuf/duration.proto\"\x82\x01\n\x12ReadIterationStats\x12\x17\n\x0frows_seen_count\x18\x01 \x01(\x03\x12\x1b\n\x13rows_returned_count\x18\x02 \x01(\x03\x12\x18\n\x10\x63\x65lls_seen_count\x18\x03 \x01(\x03\x12\x1c\n\x14\x63\x65lls_returned_count\x18\x04 \x01(\x03\"Q\n\x13RequestLatencyStats\x12:\n\x17\x66rontend_server_latency\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa1\x01\n\x11\x46ullReadStatsView\x12\x44\n\x14read_iteration_stats\x18\x01 \x01(\x0b\x32&.google.bigtable.v2.ReadIterationStats\x12\x46\n\x15request_latency_stats\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.RequestLatencyStats\"c\n\x0cRequestStats\x12\x45\n\x14\x66ull_read_stats_view\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.FullReadStatsViewH\x00\x42\x0c\n\nstats_viewB\xbd\x01\n\x16\x63om.google.bigtable.v2B\x11RequestStatsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.request_stats_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\021RequestStatsProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _READITERATIONSTATS._serialized_start=95 + _READITERATIONSTATS._serialized_end=225 + _REQUESTLATENCYSTATS._serialized_start=227 + _REQUESTLATENCYSTATS._serialized_end=308 + _FULLREADSTATSVIEW._serialized_start=311 + _FULLREADSTATSVIEW._serialized_end=472 + _REQUESTSTATS._serialized_start=474 + _REQUESTSTATS._serialized_end=573 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/request_stats_pb2_grpc.py b/test_proxy/protos/request_stats_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/test_proxy/protos/request_stats_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/test_proxy/protos/test_proxy_pb2.py b/test_proxy/protos/test_proxy_pb2.py new file mode 100644 index 000000000..1f85b086b --- /dev/null +++ b/test_proxy/protos/test_proxy_pb2.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: test_proxy.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'test_proxy.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xda\x03\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12Q\n\x17optional_feature_config\x18\x07 \x01(\x0e\x32\x30.google.bigtable.testproxy.OptionalFeatureConfig\x12X\n\x10security_options\x18\x08 \x01(\x0b\x32>.google.bigtable.testproxy.CreateClientRequest.SecurityOptions\x1as\n\x0fSecurityOptions\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x01 \x01(\t\x12\x0f\n\x07use_ssl\x18\x02 \x01(\x08\x12\x1d\n\x15ssl_endpoint_override\x18\x03 \x01(\t\x12\x1a\n\x12ssl_root_certs_pem\x18\x04 \x01(\t\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"W\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12%\n\x04rows\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"u\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x07\x65ntries\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"u\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12:\n\x07samples\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest\"b\n\x13\x45xecuteQueryRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x38\n\x07request\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.ExecuteQueryRequest\"\xa9\x01\n\x12\x45xecuteQueryResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12>\n\x08metadata\x18\x04 \x01(\x0b\x32,.google.bigtable.testproxy.ResultSetMetadata\x12/\n\x04rows\x18\x03 \x03(\x0b\x32!.google.bigtable.testproxy.SqlRow\"H\n\x11ResultSetMetadata\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"3\n\x06SqlRow\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value*d\n\x15OptionalFeatureConfig\x12#\n\x1fOPTIONAL_FEATURE_CONFIG_DEFAULT\x10\x00\x12&\n\"OPTIONAL_FEATURE_CONFIG_ENABLE_ALL\x10\x01\x32\x95\n\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12o\n\x0c\x45xecuteQuery\x12..google.bigtable.testproxy.ExecuteQueryRequest\x1a-.google.bigtable.testproxy.ExecuteQueryResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comBg\n#com.google.cloud.bigtable.testproxyP\x01Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypbb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypb' + _globals['_CLOUDBIGTABLEV2TESTPROXY']._loaded_options = None + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com' + _globals['_OPTIONALFEATURECONFIG']._serialized_start=2574 + _globals['_OPTIONALFEATURECONFIG']._serialized_end=2674 + _globals['_CREATECLIENTREQUEST']._serialized_start=196 + _globals['_CREATECLIENTREQUEST']._serialized_end=670 + _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_start=555 + _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_end=670 + _globals['_CREATECLIENTRESPONSE']._serialized_start=672 + _globals['_CREATECLIENTRESPONSE']._serialized_end=694 + _globals['_CLOSECLIENTREQUEST']._serialized_start=696 + _globals['_CLOSECLIENTREQUEST']._serialized_end=735 + _globals['_CLOSECLIENTRESPONSE']._serialized_start=737 + _globals['_CLOSECLIENTRESPONSE']._serialized_end=758 + _globals['_REMOVECLIENTREQUEST']._serialized_start=760 + _globals['_REMOVECLIENTREQUEST']._serialized_end=800 + _globals['_REMOVECLIENTRESPONSE']._serialized_start=802 + _globals['_REMOVECLIENTRESPONSE']._serialized_end=824 + _globals['_READROWREQUEST']._serialized_start=826 + _globals['_READROWREQUEST']._serialized_end=945 + _globals['_ROWRESULT']._serialized_start=947 + _globals['_ROWRESULT']._serialized_end=1032 + _globals['_READROWSREQUEST']._serialized_start=1034 + _globals['_READROWSREQUEST']._serialized_end=1151 + _globals['_ROWSRESULT']._serialized_start=1153 + _globals['_ROWSRESULT']._serialized_end=1240 + _globals['_MUTATEROWREQUEST']._serialized_start=1242 + _globals['_MUTATEROWREQUEST']._serialized_end=1334 + _globals['_MUTATEROWRESULT']._serialized_start=1336 + _globals['_MUTATEROWRESULT']._serialized_end=1389 + _globals['_MUTATEROWSREQUEST']._serialized_start=1391 + _globals['_MUTATEROWSREQUEST']._serialized_end=1485 + _globals['_MUTATEROWSRESULT']._serialized_start=1487 + _globals['_MUTATEROWSRESULT']._serialized_end=1604 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=1606 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=1714 + _globals['_CHECKANDMUTATEROWRESULT']._serialized_start=1716 + _globals['_CHECKANDMUTATEROWRESULT']._serialized_end=1840 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1842 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1942 + _globals['_SAMPLEROWKEYSRESULT']._serialized_start=1944 + _globals['_SAMPLEROWKEYSRESULT']._serialized_end=2061 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=2063 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=2173 + _globals['_EXECUTEQUERYREQUEST']._serialized_start=2175 + _globals['_EXECUTEQUERYREQUEST']._serialized_end=2273 + _globals['_EXECUTEQUERYRESULT']._serialized_start=2276 + _globals['_EXECUTEQUERYRESULT']._serialized_end=2445 + _globals['_RESULTSETMETADATA']._serialized_start=2447 + _globals['_RESULTSETMETADATA']._serialized_end=2519 + _globals['_SQLROW']._serialized_start=2521 + _globals['_SQLROW']._serialized_end=2572 + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_start=2677 + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_end=3978 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/test_proxy_pb2_grpc.py b/test_proxy/protos/test_proxy_pb2_grpc.py new file mode 100644 index 000000000..b9d11034e --- /dev/null +++ b/test_proxy/protos/test_proxy_pb2_grpc.py @@ -0,0 +1,598 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + +import test_proxy_pb2 as test__proxy__pb2 + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in test_proxy_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + + +class CloudBigtableV2TestProxyStub(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + request_serializer=test__proxy__pb2.CreateClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CreateClientResponse.FromString, + _registered_method=True) + self.CloseClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + request_serializer=test__proxy__pb2.CloseClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CloseClientResponse.FromString, + _registered_method=True) + self.RemoveClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + request_serializer=test__proxy__pb2.RemoveClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RemoveClientResponse.FromString, + _registered_method=True) + self.ReadRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + request_serializer=test__proxy__pb2.ReadRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + _registered_method=True) + self.ReadRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + request_serializer=test__proxy__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowsResult.FromString, + _registered_method=True) + self.MutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + request_serializer=test__proxy__pb2.MutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowResult.FromString, + _registered_method=True) + self.BulkMutateRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + request_serializer=test__proxy__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowsResult.FromString, + _registered_method=True) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + request_serializer=test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CheckAndMutateRowResult.FromString, + _registered_method=True) + self.SampleRowKeys = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + request_serializer=test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=test__proxy__pb2.SampleRowKeysResult.FromString, + _registered_method=True) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + request_serializer=test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + _registered_method=True) + self.ExecuteQuery = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery', + request_serializer=test__proxy__pb2.ExecuteQueryRequest.SerializeToString, + response_deserializer=test__proxy__pb2.ExecuteQueryResult.FromString, + _registered_method=True) + + +class CloudBigtableV2TestProxyServicer(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def CreateClient(self, request, context): + """Client management: + + Creates a client in the proxy. + Each client has its own dedicated channel(s), and can be used concurrently + and independently with other clients. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CloseClient(self, request, context): + """Closes a client in the proxy, making it not accept new requests. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RemoveClient(self, request, context): + """Removes a client in the proxy, making it inaccessible. Client closing + should be done by CloseClient() separately. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRow(self, request, context): + """Bigtable operations: for each operation, you should use the synchronous or + asynchronous variant of the client method based on the `use_async_method` + setting of the client instance. For starters, you can choose to implement + one variant, and return UNIMPLEMENTED status for the other. + + Reads a row with the client instance. + The result row may not be present in the response. + Callers should check for it (e.g. calling has_row() in C++). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRows(self, request, context): + """Reads rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Writes a row with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def BulkMutateRows(self, request, context): + """Writes multiple rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Performs a check-and-mutate-row operation with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Obtains a row key sampling with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Performs a read-modify-write operation with the client. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExecuteQuery(self, request, context): + """Executes a BTQL query with the client. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateClient': grpc.unary_unary_rpc_method_handler( + servicer.CreateClient, + request_deserializer=test__proxy__pb2.CreateClientRequest.FromString, + response_serializer=test__proxy__pb2.CreateClientResponse.SerializeToString, + ), + 'CloseClient': grpc.unary_unary_rpc_method_handler( + servicer.CloseClient, + request_deserializer=test__proxy__pb2.CloseClientRequest.FromString, + response_serializer=test__proxy__pb2.CloseClientResponse.SerializeToString, + ), + 'RemoveClient': grpc.unary_unary_rpc_method_handler( + servicer.RemoveClient, + request_deserializer=test__proxy__pb2.RemoveClientRequest.FromString, + response_serializer=test__proxy__pb2.RemoveClientResponse.SerializeToString, + ), + 'ReadRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadRow, + request_deserializer=test__proxy__pb2.ReadRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + 'ReadRows': grpc.unary_unary_rpc_method_handler( + servicer.ReadRows, + request_deserializer=test__proxy__pb2.ReadRowsRequest.FromString, + response_serializer=test__proxy__pb2.RowsResult.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=test__proxy__pb2.MutateRowRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowResult.SerializeToString, + ), + 'BulkMutateRows': grpc.unary_unary_rpc_method_handler( + servicer.BulkMutateRows, + request_deserializer=test__proxy__pb2.MutateRowsRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowsResult.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=test__proxy__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=test__proxy__pb2.CheckAndMutateRowResult.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_unary_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=test__proxy__pb2.SampleRowKeysRequest.FromString, + response_serializer=test__proxy__pb2.SampleRowKeysResult.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=test__proxy__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + 'ExecuteQuery': grpc.unary_unary_rpc_method_handler( + servicer.ExecuteQuery, + request_deserializer=test__proxy__pb2.ExecuteQueryRequest.FromString, + response_serializer=test__proxy__pb2.ExecuteQueryResult.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class CloudBigtableV2TestProxy(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + @staticmethod + def CreateClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + test__proxy__pb2.CreateClientRequest.SerializeToString, + test__proxy__pb2.CreateClientResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CloseClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + test__proxy__pb2.CloseClientRequest.SerializeToString, + test__proxy__pb2.CloseClientResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def RemoveClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + test__proxy__pb2.RemoveClientRequest.SerializeToString, + test__proxy__pb2.RemoveClientResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + test__proxy__pb2.ReadRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + test__proxy__pb2.ReadRowsRequest.SerializeToString, + test__proxy__pb2.RowsResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + test__proxy__pb2.MutateRowRequest.SerializeToString, + test__proxy__pb2.MutateRowResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def BulkMutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + test__proxy__pb2.MutateRowsRequest.SerializeToString, + test__proxy__pb2.MutateRowsResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + test__proxy__pb2.CheckAndMutateRowResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + test__proxy__pb2.SampleRowKeysResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ExecuteQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery', + test__proxy__pb2.ExecuteQueryRequest.SerializeToString, + test__proxy__pb2.ExecuteQueryResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/test_proxy/protos/types_pb2.py b/test_proxy/protos/types_pb2.py new file mode 100644 index 000000000..7acdbf7f1 --- /dev/null +++ b/test_proxy/protos/types_pb2.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/bigtable/v2/types.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/types.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/bigtable/v2/types.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\"\xe0\x10\n\x04Type\x12\x34\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesH\x00\x12\x36\n\x0bstring_type\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StringH\x00\x12\x34\n\nint64_type\x18\x05 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.Int64H\x00\x12\x38\n\x0c\x66loat32_type\x18\x0c \x01(\x0b\x32 .google.bigtable.v2.Type.Float32H\x00\x12\x38\n\x0c\x66loat64_type\x18\t \x01(\x0b\x32 .google.bigtable.v2.Type.Float64H\x00\x12\x32\n\tbool_type\x18\x08 \x01(\x0b\x32\x1d.google.bigtable.v2.Type.BoolH\x00\x12<\n\x0etimestamp_type\x18\n \x01(\x0b\x32\".google.bigtable.v2.Type.TimestampH\x00\x12\x32\n\tdate_type\x18\x0b \x01(\x0b\x32\x1d.google.bigtable.v2.Type.DateH\x00\x12<\n\x0e\x61ggregate_type\x18\x06 \x01(\x0b\x32\".google.bigtable.v2.Type.AggregateH\x00\x12\x36\n\x0bstruct_type\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StructH\x00\x12\x34\n\narray_type\x18\x03 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.ArrayH\x00\x12\x30\n\x08map_type\x18\x04 \x01(\x0b\x32\x1c.google.bigtable.v2.Type.MapH\x00\x1a\x9d\x01\n\x05\x42ytes\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Bytes.Encoding\x1aY\n\x08\x45ncoding\x12:\n\x03raw\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.Type.Bytes.Encoding.RawH\x00\x1a\x05\n\x03RawB\n\n\x08\x65ncoding\x1a\x8d\x02\n\x06String\x12:\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32(.google.bigtable.v2.Type.String.Encoding\x1a\xc6\x01\n\x08\x45ncoding\x12H\n\x08utf8_raw\x18\x01 \x01(\x0b\x32\x30.google.bigtable.v2.Type.String.Encoding.Utf8RawB\x02\x18\x01H\x00\x12H\n\nutf8_bytes\x18\x02 \x01(\x0b\x32\x32.google.bigtable.v2.Type.String.Encoding.Utf8BytesH\x00\x1a\r\n\x07Utf8Raw:\x02\x18\x01\x1a\x0b\n\tUtf8BytesB\n\n\x08\x65ncoding\x1a\xf5\x01\n\x05Int64\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Int64.Encoding\x1a\xb0\x01\n\x08\x45ncoding\x12R\n\x10\x62ig_endian_bytes\x18\x01 \x01(\x0b\x32\x36.google.bigtable.v2.Type.Int64.Encoding.BigEndianBytesH\x00\x1a\x44\n\x0e\x42igEndianBytes\x12\x32\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesB\n\n\x08\x65ncoding\x1a\x06\n\x04\x42ool\x1a\t\n\x07\x46loat32\x1a\t\n\x07\x46loat64\x1a\x0b\n\tTimestamp\x1a\x06\n\x04\x44\x61te\x1a\x84\x01\n\x06Struct\x12\x35\n\x06\x66ields\x18\x01 \x03(\x0b\x32%.google.bigtable.v2.Type.Struct.Field\x1a\x43\n\x05\x46ield\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\x37\n\x05\x41rray\x12.\n\x0c\x65lement_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a_\n\x03Map\x12*\n\x08key_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12,\n\nvalue_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\xb7\x03\n\tAggregate\x12,\n\ninput_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x31\n\nstate_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.TypeB\x03\xe0\x41\x03\x12\x35\n\x03sum\x18\x04 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.SumH\x00\x12_\n\x12hllpp_unique_count\x18\x05 \x01(\x0b\x32\x41.google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCountH\x00\x12\x35\n\x03max\x18\x06 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MaxH\x00\x12\x35\n\x03min\x18\x07 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MinH\x00\x1a\x05\n\x03Sum\x1a\x05\n\x03Max\x1a\x05\n\x03Min\x1a \n\x1eHyperLogLogPlusPlusUniqueCountB\x0c\n\naggregatorB\x06\n\x04kindB\xb4\x01\n\x16\x63om.google.bigtable.v2B\nTypesProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.types_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\nTypesProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._loaded_options = None + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_options = b'\030\001' + _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._loaded_options = None + _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._serialized_options = b'\030\001' + _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._loaded_options = None + _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._serialized_options = b'\340A\003' + _globals['_TYPE']._serialized_start=88 + _globals['_TYPE']._serialized_end=2232 + _globals['_TYPE_BYTES']._serialized_start=765 + _globals['_TYPE_BYTES']._serialized_end=922 + _globals['_TYPE_BYTES_ENCODING']._serialized_start=833 + _globals['_TYPE_BYTES_ENCODING']._serialized_end=922 + _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_start=905 + _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_end=910 + _globals['_TYPE_STRING']._serialized_start=925 + _globals['_TYPE_STRING']._serialized_end=1194 + _globals['_TYPE_STRING_ENCODING']._serialized_start=996 + _globals['_TYPE_STRING_ENCODING']._serialized_end=1194 + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_start=1156 + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_end=1169 + _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_start=1171 + _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_end=1182 + _globals['_TYPE_INT64']._serialized_start=1197 + _globals['_TYPE_INT64']._serialized_end=1442 + _globals['_TYPE_INT64_ENCODING']._serialized_start=1266 + _globals['_TYPE_INT64_ENCODING']._serialized_end=1442 + _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_start=1362 + _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_end=1430 + _globals['_TYPE_BOOL']._serialized_start=1444 + _globals['_TYPE_BOOL']._serialized_end=1450 + _globals['_TYPE_FLOAT32']._serialized_start=1452 + _globals['_TYPE_FLOAT32']._serialized_end=1461 + _globals['_TYPE_FLOAT64']._serialized_start=1463 + _globals['_TYPE_FLOAT64']._serialized_end=1472 + _globals['_TYPE_TIMESTAMP']._serialized_start=1474 + _globals['_TYPE_TIMESTAMP']._serialized_end=1485 + _globals['_TYPE_DATE']._serialized_start=1487 + _globals['_TYPE_DATE']._serialized_end=1493 + _globals['_TYPE_STRUCT']._serialized_start=1496 + _globals['_TYPE_STRUCT']._serialized_end=1628 + _globals['_TYPE_STRUCT_FIELD']._serialized_start=1561 + _globals['_TYPE_STRUCT_FIELD']._serialized_end=1628 + _globals['_TYPE_ARRAY']._serialized_start=1630 + _globals['_TYPE_ARRAY']._serialized_end=1685 + _globals['_TYPE_MAP']._serialized_start=1687 + _globals['_TYPE_MAP']._serialized_end=1782 + _globals['_TYPE_AGGREGATE']._serialized_start=1785 + _globals['_TYPE_AGGREGATE']._serialized_end=2224 + _globals['_TYPE_AGGREGATE_SUM']._serialized_start=2157 + _globals['_TYPE_AGGREGATE_SUM']._serialized_end=2162 + _globals['_TYPE_AGGREGATE_MAX']._serialized_start=2164 + _globals['_TYPE_AGGREGATE_MAX']._serialized_end=2169 + _globals['_TYPE_AGGREGATE_MIN']._serialized_start=2171 + _globals['_TYPE_AGGREGATE_MIN']._serialized_end=2176 + _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_start=2178 + _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_end=2210 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/types_pb2_grpc.py b/test_proxy/protos/types_pb2_grpc.py new file mode 100644 index 000000000..29956dd38 --- /dev/null +++ b/test_proxy/protos/types_pb2_grpc.py @@ -0,0 +1,24 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/types_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) diff --git a/test_proxy/run_tests.sh b/test_proxy/run_tests.sh new file mode 100755 index 000000000..b6f1291a6 --- /dev/null +++ b/test_proxy/run_tests.sh @@ -0,0 +1,61 @@ +#!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# attempt download golang if not found +if [[ ! -x "$(command -v go)" ]]; then + echo "Downloading golang..." + wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz + tar -xzf go1.20.2.linux-amd64.tar.gz + export GOROOT=$(pwd)/go + export PATH=$GOROOT/bin:$PATH + export GOPATH=$HOME/go + go version +fi + +# ensure the working dir is the script's folder +SCRIPT_DIR=$(realpath $(dirname "$0")) +cd $SCRIPT_DIR + +export PROXY_SERVER_PORT=$(shuf -i 50000-60000 -n 1) + +# download test suite +if [ ! -d "cloud-bigtable-clients-test" ]; then + git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +fi + +# start proxy +echo "starting with client type: $CLIENT_TYPE" +python test_proxy.py --port $PROXY_SERVER_PORT --client_type $CLIENT_TYPE & +PROXY_PID=$! +function finish { + kill $PROXY_PID +} +trap finish EXIT + +if [[ $CLIENT_TYPE == "legacy" ]]; then + echo "Using legacy client" + # legacy client does not expose mutate_row. Disable those tests + TEST_ARGS="-skip TestMutateRow_" +fi + +if [[ $CLIENT_TYPE != "async" ]]; then + echo "Using legacy client" + # sync and legacy client do not support concurrent streams + TEST_ARGS="$TEST_ARGS -skip _Generic_MultiStream " +fi + +# run tests +pushd cloud-bigtable-clients-test/tests +echo "Running with $TEST_ARGS" +go test -v -proxy_addr=:$PROXY_SERVER_PORT $TEST_ARGS diff --git a/test_proxy/test_proxy.py b/test_proxy/test_proxy.py new file mode 100644 index 000000000..793500768 --- /dev/null +++ b/test_proxy/test_proxy.py @@ -0,0 +1,194 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Python implementation of the `cloud-bigtable-clients-test` proxy server. + +https://github.com/googleapis/cloud-bigtable-clients-test + +This server is intended to be used to test the correctness of Bigtable +clients across languages. + +Contributor Note: the proxy implementation is split across TestProxyClientHandler +and TestProxyGrpcServer. This is due to the fact that generated protos and proto-plus +objects cannot be used in the same process, so we had to make use of the +multiprocessing module to allow them to work together. +""" + +import multiprocessing +import argparse +import sys +import os +sys.path.append("handlers") + + +def grpc_server_process(request_q, queue_pool, port=50055): + """ + Defines a process that hosts a grpc server + proxies requests to a client_handler_process + """ + sys.path.append("protos") + from concurrent import futures + + import grpc + import test_proxy_pb2_grpc + import grpc_handler + + # Start gRPC server + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + test_proxy_pb2_grpc.add_CloudBigtableV2TestProxyServicer_to_server( + grpc_handler.TestProxyGrpcServer(request_q, queue_pool), server + ) + server.add_insecure_port("[::]:" + port) + server.start() + print("grpc_server_process started, listening on " + port) + server.wait_for_termination() + + +async def client_handler_process_async(request_q, queue_pool, client_type="async"): + """ + Defines a process that recives Bigtable requests from a grpc_server_process, + and runs the request using a client library instance + """ + import base64 + import re + import asyncio + import warnings + import client_handler_data_async + warnings.filterwarnings("ignore", category=RuntimeWarning, message=".*Bigtable emulator.*") + + def camel_to_snake(str): + return re.sub(r"(?= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.34.0 -google-cloud-core==1.4.4 +google-api-core==2.17.0 +google-auth==2.23.0 +google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 -proto-plus==1.22.0 +proto-plus==1.22.3 libcst==0.2.5 -protobuf==3.19.5 +protobuf==3.20.2 diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index e69de29bb..a7e4616c9 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -0,0 +1,15 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==2.17.0 +google-auth==2.23.0 +google-cloud-core==2.0.0 +grpc-google-iam-v1==0.12.4 +proto-plus==1.22.3 +libcst==0.2.5 +protobuf==3.20.2 +pytest-asyncio==0.21.2 diff --git a/tests/__init__.py b/tests/__init__.py index 89a37dc92..cbf94b283 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/system/__init__.py b/tests/system/__init__.py index 4de65971c..89a37dc92 100644 --- a/tests/system/__init__.py +++ b/tests/system/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/system/admin_overlay/__init__.py b/tests/system/admin_overlay/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/system/admin_overlay/conftest.py b/tests/system/admin_overlay/conftest.py new file mode 100644 index 000000000..66baef3f4 --- /dev/null +++ b/tests/system/admin_overlay/conftest.py @@ -0,0 +1,38 @@ +import google.auth + +import os +import pytest +import uuid + + +INSTANCE_PREFIX = "admin-overlay-instance" +BACKUP_PREFIX = "admin-overlay-backup" +ROW_PREFIX = "test-row" + +DEFAULT_CLUSTER_LOCATIONS = ["us-east1-b"] +REPLICATION_CLUSTER_LOCATIONS = ["us-east1-b", "us-west1-b"] +TEST_TABLE_NAME = "system-test-table" +TEST_BACKUP_TABLE_NAME = "system-test-backup-table" +TEST_COLUMMN_FAMILY_NAME = "test-column" +TEST_COLUMN_NAME = "value" +NUM_ROWS = 500 +INITIAL_CELL_VALUE = "Hello" +NEW_CELL_VALUE = "World" + + +@pytest.fixture(scope="session") +def admin_overlay_project_id(): + project_id = os.getenv("GOOGLE_CLOUD_PROJECT") + if not project_id: + _, project_id = google.auth.default() + return project_id + + +def generate_unique_suffix(name): + """ + Generates a unique suffix for the name. + + Uses UUID4 because using time.time doesn't guarantee + uniqueness when the time is frozen in containers. + """ + return f"{name}-{uuid.uuid4().hex[:7]}" diff --git a/tests/system/admin_overlay/test_system_async.py b/tests/system/admin_overlay/test_system_async.py new file mode 100644 index 000000000..aa412569e --- /dev/null +++ b/tests/system/admin_overlay/test_system_async.py @@ -0,0 +1,395 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +from google.cloud import bigtable_admin_v2 as admin_v2 +from google.cloud.bigtable.data._cross_sync import CrossSync +from google.cloud.bigtable.data import mutations, read_rows_query +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from .conftest import ( + INSTANCE_PREFIX, + BACKUP_PREFIX, + ROW_PREFIX, + DEFAULT_CLUSTER_LOCATIONS, + REPLICATION_CLUSTER_LOCATIONS, + TEST_TABLE_NAME, + TEST_BACKUP_TABLE_NAME, + TEST_COLUMMN_FAMILY_NAME, + TEST_COLUMN_NAME, + NUM_ROWS, + INITIAL_CELL_VALUE, + NEW_CELL_VALUE, + generate_unique_suffix, +) + +from datetime import datetime, timedelta + +import pytest +import os + + +if CrossSync.is_async: + from google.api_core import operation_async as api_core_operation +else: + from google.api_core import operation as api_core_operation + + +__CROSS_SYNC_OUTPUT__ = "tests.system.admin_overlay.test_system_autogen" + +if os.getenv(BIGTABLE_EMULATOR): + pytest.skip( + allow_module_level=True, + reason="Emulator support for admin client tests unsupported.", + ) + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def data_client(admin_overlay_project_id): + async with CrossSync.DataClient(project=admin_overlay_project_id) as client: + yield client + + +@CrossSync.convert( + replace_symbols={"BigtableTableAdminAsyncClient": "BigtableTableAdminClient"} +) +@CrossSync.pytest_fixture(scope="session") +async def table_admin_client(admin_overlay_project_id): + async with admin_v2.BigtableTableAdminAsyncClient( + client_options={ + "quota_project_id": admin_overlay_project_id, + } + ) as client: + yield client + + +@CrossSync.convert( + replace_symbols={"BigtableInstanceAdminAsyncClient": "BigtableInstanceAdminClient"} +) +@CrossSync.pytest_fixture(scope="session") +async def instance_admin_client(admin_overlay_project_id): + async with admin_v2.BigtableInstanceAdminAsyncClient( + client_options={ + "quota_project_id": admin_overlay_project_id, + } + ) as client: + yield client + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def instances_to_delete(instance_admin_client): + instances = [] + + try: + yield instances + finally: + for instance in instances: + await instance_admin_client.delete_instance(name=instance.name) + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def backups_to_delete(table_admin_client): + backups = [] + + try: + yield backups + finally: + for backup in backups: + await table_admin_client.delete_backup(name=backup.name) + + +@CrossSync.convert +async def create_instance( + instance_admin_client, + table_admin_client, + data_client, + project_id, + instances_to_delete, + storage_type=admin_v2.StorageType.HDD, + cluster_locations=DEFAULT_CLUSTER_LOCATIONS, +) -> Tuple[admin_v2.Instance, admin_v2.Table]: + """ + Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations. + + After creating the Bigtable instance, it will create a test table and populate it with dummy data. + This is not defined as a fixture because the different system tests need different kinds of instances. + """ + # Create the instance + clusters = {} + + instance_id = generate_unique_suffix(INSTANCE_PREFIX) + + for idx, location in enumerate(cluster_locations): + clusters[location] = admin_v2.Cluster( + name=instance_admin_client.cluster_path( + project_id, instance_id, f"{instance_id}-{idx}" + ), + location=instance_admin_client.common_location_path(project_id, location), + default_storage_type=storage_type, + ) + + # Instance and cluster creation are currently unsupported in the Bigtable emulator + if os.getenv(BIGTABLE_EMULATOR): + # All we need for system tests so far is the instance name. + instance = admin_v2.Instance( + name=instance_admin_client.instance_path(project_id, instance_id), + ) + else: + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance( + display_name=instance_id[ + :30 + ], # truncate to 30 characters because of character limit + ), + clusters=clusters, + ) + operation = await instance_admin_client.create_instance(create_instance_request) + instance = await operation.result() + + instances_to_delete.append(instance) + + # Create a table within the instance + create_table_request = admin_v2.CreateTableRequest( + parent=instance_admin_client.instance_path(project_id, instance_id), + table_id=TEST_TABLE_NAME, + table=admin_v2.Table( + column_families={ + TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily(), + } + ), + ) + + table = await table_admin_client.create_table(create_table_request) + + # Populate with dummy data + await populate_table( + table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE + ) + + return instance, table + + +@CrossSync.convert +async def populate_table(table_admin_client, data_client, instance, table, cell_value): + """ + Populates all the test cells in the given table with the given cell value. + + This is used to populate test data when creating an instance, and for testing the + wait_for_consistency call. + """ + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + row_mutation_entries = [] + for i in range(0, NUM_ROWS): + row_mutation_entries.append( + mutations.RowMutationEntry( + row_key=f"{ROW_PREFIX}-{i}", + mutations=[ + mutations.SetCell( + family=TEST_COLUMMN_FAMILY_NAME, + qualifier=TEST_COLUMN_NAME, + new_value=cell_value, + timestamp_micros=-1, + ) + ], + ) + ) + + await data_client_table.bulk_mutate_rows(row_mutation_entries) + + +@CrossSync.convert +async def create_backup( + instance_admin_client, table_admin_client, instance, table, backups_to_delete +) -> admin_v2.Backup: + """ + Creates a backup of the given table under the given instance. + + This will be restored to a different instance later on, to test + optimize_restored_table. + """ + # Get a cluster in the instance for the backup + list_clusters_response = await instance_admin_client.list_clusters( + parent=instance.name + ) + cluster_name = list_clusters_response.clusters[0].name + + backup_id = generate_unique_suffix(BACKUP_PREFIX) + + # Create the backup + operation = await table_admin_client.create_backup( + admin_v2.CreateBackupRequest( + parent=cluster_name, + backup_id=backup_id, + backup=admin_v2.Backup( + name=f"{cluster_name}/backups/{backup_id}", + source_table=table.name, + expire_time=datetime.now() + timedelta(hours=7), + ), + ) + ) + + backup = await operation.result() + backups_to_delete.append(backup) + return backup + + +@CrossSync.convert +async def assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, value +): + """ + Asserts that all cells in the given table have the given value. + """ + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + + # Read all the rows; there shouldn't be that many of them + query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS) + async for row in await data_client_table.read_rows_stream(query): + latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0] + assert latest_cell.value.decode("utf-8") == value + + +@CrossSync.convert( + replace_symbols={ + "AsyncRestoreTableOperation": "RestoreTableOperation", + "AsyncOperation": "Operation", + } +) +@CrossSync.pytest +@pytest.mark.skipif( + os.getenv(BIGTABLE_EMULATOR), + reason="Backups are not supported in the Bigtable emulator", +) +@pytest.mark.parametrize( + "second_instance_storage_type,expect_optimize_operation", + [ + (admin_v2.StorageType.HDD, False), + (admin_v2.StorageType.SSD, True), + ], +) +async def test_optimize_restored_table( + admin_overlay_project_id, + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + backups_to_delete, + second_instance_storage_type, + expect_optimize_operation, +): + # Create two instances. We backup a table from the first instance to a new table in the + # second instance. This is to test whether or not different scenarios trigger an + # optimize_restored_table operation + instance_with_backup, table_to_backup = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + admin_v2.StorageType.HDD, + ) + + instance_to_restore, _ = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + second_instance_storage_type, + ) + + backup = await create_backup( + instance_admin_client, + table_admin_client, + instance_with_backup, + table_to_backup, + backups_to_delete, + ) + + # Restore to other instance + restore_operation = await table_admin_client.restore_table( + admin_v2.RestoreTableRequest( + parent=instance_to_restore.name, + table_id=TEST_BACKUP_TABLE_NAME, + backup=backup.name, + ) + ) + + assert isinstance(restore_operation, admin_v2.AsyncRestoreTableOperation) + restored_table = await restore_operation.result() + + optimize_operation = await restore_operation.optimize_restored_table_operation() + if expect_optimize_operation: + assert isinstance(optimize_operation, api_core_operation.AsyncOperation) + await optimize_operation.result() + else: + assert optimize_operation is None + + # Test that the new table exists + assert ( + restored_table.name + == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}" + ) + await assert_table_cell_value_equal_to( + table_admin_client, + data_client, + instance_to_restore, + restored_table, + INITIAL_CELL_VALUE, + ) + + +@CrossSync.pytest +async def test_wait_for_consistency( + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + admin_overlay_project_id, +): + # Create an instance and a table, then try to write NEW_CELL_VALUE + # to each table row instead of INITIAL_CELL_VALUE. + instance, table = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + cluster_locations=REPLICATION_CLUSTER_LOCATIONS, + ) + + await populate_table( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) + + wait_for_consistency_request = admin_v2.WaitForConsistencyRequest( + name=table.name, + standard_read_remote_writes=admin_v2.StandardReadRemoteWrites(), + ) + await table_admin_client.wait_for_consistency(wait_for_consistency_request) + await assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) diff --git a/tests/system/admin_overlay/test_system_autogen.py b/tests/system/admin_overlay/test_system_autogen.py new file mode 100644 index 000000000..4fde3571f --- /dev/null +++ b/tests/system/admin_overlay/test_system_autogen.py @@ -0,0 +1,300 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from typing import Tuple +from google.cloud import bigtable_admin_v2 as admin_v2 +from google.cloud.bigtable.data._cross_sync import CrossSync +from google.cloud.bigtable.data import mutations, read_rows_query +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from .conftest import ( + INSTANCE_PREFIX, + BACKUP_PREFIX, + ROW_PREFIX, + DEFAULT_CLUSTER_LOCATIONS, + REPLICATION_CLUSTER_LOCATIONS, + TEST_TABLE_NAME, + TEST_BACKUP_TABLE_NAME, + TEST_COLUMMN_FAMILY_NAME, + TEST_COLUMN_NAME, + NUM_ROWS, + INITIAL_CELL_VALUE, + NEW_CELL_VALUE, + generate_unique_suffix, +) +from datetime import datetime, timedelta +import pytest +import os +from google.api_core import operation as api_core_operation + +if os.getenv(BIGTABLE_EMULATOR): + pytest.skip( + allow_module_level=True, + reason="Emulator support for admin client tests unsupported.", + ) + + +@pytest.fixture(scope="session") +def data_client(admin_overlay_project_id): + with CrossSync._Sync_Impl.DataClient(project=admin_overlay_project_id) as client: + yield client + + +@pytest.fixture(scope="session") +def table_admin_client(admin_overlay_project_id): + with admin_v2.BigtableTableAdminClient( + client_options={"quota_project_id": admin_overlay_project_id} + ) as client: + yield client + + +@pytest.fixture(scope="session") +def instance_admin_client(admin_overlay_project_id): + with admin_v2.BigtableInstanceAdminClient( + client_options={"quota_project_id": admin_overlay_project_id} + ) as client: + yield client + + +@pytest.fixture(scope="session") +def instances_to_delete(instance_admin_client): + instances = [] + try: + yield instances + finally: + for instance in instances: + instance_admin_client.delete_instance(name=instance.name) + + +@pytest.fixture(scope="session") +def backups_to_delete(table_admin_client): + backups = [] + try: + yield backups + finally: + for backup in backups: + table_admin_client.delete_backup(name=backup.name) + + +def create_instance( + instance_admin_client, + table_admin_client, + data_client, + project_id, + instances_to_delete, + storage_type=admin_v2.StorageType.HDD, + cluster_locations=DEFAULT_CLUSTER_LOCATIONS, +) -> Tuple[admin_v2.Instance, admin_v2.Table]: + """Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations. + + After creating the Bigtable instance, it will create a test table and populate it with dummy data. + This is not defined as a fixture because the different system tests need different kinds of instances. + """ + clusters = {} + instance_id = generate_unique_suffix(INSTANCE_PREFIX) + for idx, location in enumerate(cluster_locations): + clusters[location] = admin_v2.Cluster( + name=instance_admin_client.cluster_path( + project_id, instance_id, f"{instance_id}-{idx}" + ), + location=instance_admin_client.common_location_path(project_id, location), + default_storage_type=storage_type, + ) + if os.getenv(BIGTABLE_EMULATOR): + instance = admin_v2.Instance( + name=instance_admin_client.instance_path(project_id, instance_id) + ) + else: + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance(display_name=instance_id[:30]), + clusters=clusters, + ) + operation = instance_admin_client.create_instance(create_instance_request) + instance = operation.result() + instances_to_delete.append(instance) + create_table_request = admin_v2.CreateTableRequest( + parent=instance_admin_client.instance_path(project_id, instance_id), + table_id=TEST_TABLE_NAME, + table=admin_v2.Table( + column_families={TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily()} + ), + ) + table = table_admin_client.create_table(create_table_request) + populate_table(table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE) + return (instance, table) + + +def populate_table(table_admin_client, data_client, instance, table, cell_value): + """Populates all the test cells in the given table with the given cell value. + + This is used to populate test data when creating an instance, and for testing the + wait_for_consistency call.""" + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + row_mutation_entries = [] + for i in range(0, NUM_ROWS): + row_mutation_entries.append( + mutations.RowMutationEntry( + row_key=f"{ROW_PREFIX}-{i}", + mutations=[ + mutations.SetCell( + family=TEST_COLUMMN_FAMILY_NAME, + qualifier=TEST_COLUMN_NAME, + new_value=cell_value, + timestamp_micros=-1, + ) + ], + ) + ) + data_client_table.bulk_mutate_rows(row_mutation_entries) + + +def create_backup( + instance_admin_client, table_admin_client, instance, table, backups_to_delete +) -> admin_v2.Backup: + """Creates a backup of the given table under the given instance. + + This will be restored to a different instance later on, to test + optimize_restored_table.""" + list_clusters_response = instance_admin_client.list_clusters(parent=instance.name) + cluster_name = list_clusters_response.clusters[0].name + backup_id = generate_unique_suffix(BACKUP_PREFIX) + operation = table_admin_client.create_backup( + admin_v2.CreateBackupRequest( + parent=cluster_name, + backup_id=backup_id, + backup=admin_v2.Backup( + name=f"{cluster_name}/backups/{backup_id}", + source_table=table.name, + expire_time=datetime.now() + timedelta(hours=7), + ), + ) + ) + backup = operation.result() + backups_to_delete.append(backup) + return backup + + +def assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, value +): + """Asserts that all cells in the given table have the given value.""" + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS) + for row in data_client_table.read_rows_stream(query): + latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0] + assert latest_cell.value.decode("utf-8") == value + + +@pytest.mark.skipif( + os.getenv(BIGTABLE_EMULATOR), + reason="Backups are not supported in the Bigtable emulator", +) +@pytest.mark.parametrize( + "second_instance_storage_type,expect_optimize_operation", + [(admin_v2.StorageType.HDD, False), (admin_v2.StorageType.SSD, True)], +) +def test_optimize_restored_table( + admin_overlay_project_id, + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + backups_to_delete, + second_instance_storage_type, + expect_optimize_operation, +): + (instance_with_backup, table_to_backup) = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + admin_v2.StorageType.HDD, + ) + (instance_to_restore, _) = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + second_instance_storage_type, + ) + backup = create_backup( + instance_admin_client, + table_admin_client, + instance_with_backup, + table_to_backup, + backups_to_delete, + ) + restore_operation = table_admin_client.restore_table( + admin_v2.RestoreTableRequest( + parent=instance_to_restore.name, + table_id=TEST_BACKUP_TABLE_NAME, + backup=backup.name, + ) + ) + assert isinstance(restore_operation, admin_v2.RestoreTableOperation) + restored_table = restore_operation.result() + optimize_operation = restore_operation.optimize_restored_table_operation() + if expect_optimize_operation: + assert isinstance(optimize_operation, api_core_operation.Operation) + optimize_operation.result() + else: + assert optimize_operation is None + assert ( + restored_table.name + == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}" + ) + assert_table_cell_value_equal_to( + table_admin_client, + data_client, + instance_to_restore, + restored_table, + INITIAL_CELL_VALUE, + ) + + +def test_wait_for_consistency( + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + admin_overlay_project_id, +): + (instance, table) = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + cluster_locations=REPLICATION_CLUSTER_LOCATIONS, + ) + populate_table(table_admin_client, data_client, instance, table, NEW_CELL_VALUE) + wait_for_consistency_request = admin_v2.WaitForConsistencyRequest( + name=table.name, standard_read_remote_writes=admin_v2.StandardReadRemoteWrites() + ) + table_admin_client.wait_for_consistency(wait_for_consistency_request) + assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index f39fcba88..8c0eb30b1 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2011 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,199 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +""" +Import pytest fixtures for setting up table for data client system tests +""" +import sys import os import pytest -from test_utils.system import unique_resource_id - -from google.cloud.bigtable.client import Client -from google.cloud.environment_vars import BIGTABLE_EMULATOR - -from . import _helpers - - -@pytest.fixture(scope="session") -def in_emulator(): - return os.getenv(BIGTABLE_EMULATOR) is not None - - -@pytest.fixture(scope="session") -def kms_key_name(): - return os.getenv("KMS_KEY_NAME") - - -@pytest.fixture(scope="session") -def with_kms_key_name(kms_key_name): - if kms_key_name is None: - pytest.skip("Test requires KMS_KEY_NAME environment variable") - return kms_key_name - - -@pytest.fixture(scope="session") -def skip_on_emulator(in_emulator): - if in_emulator: - pytest.skip("Emulator does not support this feature") - - -@pytest.fixture(scope="session") -def unique_suffix(): - return unique_resource_id("-") - - -@pytest.fixture(scope="session") -def location_id(): - return "us-central1-c" - - -@pytest.fixture(scope="session") -def serve_nodes(): - return 3 - - -@pytest.fixture(scope="session") -def label_key(): - return "python-system" - - -@pytest.fixture(scope="session") -def instance_labels(label_key): - return {label_key: _helpers.label_stamp()} - - -@pytest.fixture(scope="session") -def admin_client(): - return Client(admin=True) - - -@pytest.fixture(scope="session") -def service_account(admin_client): - from google.oauth2.service_account import Credentials - - if not isinstance(admin_client._credentials, Credentials): - pytest.skip("These tests require a service account credential") - return admin_client._credentials - - -@pytest.fixture(scope="session") -def admin_instance_id(unique_suffix): - return f"g-c-p{unique_suffix}" +import asyncio +script_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(script_path) -@pytest.fixture(scope="session") -def admin_cluster_id(admin_instance_id): - return f"{admin_instance_id}-cluster" - - -@pytest.fixture(scope="session") -def admin_instance(admin_client, admin_instance_id, instance_labels): - return admin_client.instance(admin_instance_id, labels=instance_labels) - - -@pytest.fixture(scope="session") -def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): - return admin_instance.cluster( - admin_cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - ) - - -@pytest.fixture(scope="session") -def admin_cluster_with_autoscaling( - admin_instance, - admin_cluster_id, - location_id, - min_serve_nodes, - max_serve_nodes, - cpu_utilization_percent, -): - return admin_instance.cluster( - admin_cluster_id, - location_id=location_id, - min_serve_nodes=min_serve_nodes, - max_serve_nodes=max_serve_nodes, - cpu_utilization_percent=cpu_utilization_percent, - ) - - -@pytest.fixture(scope="session") -def admin_instance_populated(admin_instance, admin_cluster, in_emulator): - # Emulator does not support instance admin operations (create / delete). - # See: https://cloud.google.com/bigtable/docs/emulator - if not in_emulator: - operation = admin_instance.create(clusters=[admin_cluster]) - operation.result(timeout=240) - - yield admin_instance - - if not in_emulator: - _helpers.retry_429(admin_instance.delete)() - - -@pytest.fixture(scope="session") -def data_client(): - return Client(admin=False) - - -@pytest.fixture(scope="session") -def data_instance_id(unique_suffix): - return f"g-c-p-d{unique_suffix}" - - -@pytest.fixture(scope="session") -def data_cluster_id(data_instance_id): - return f"{data_instance_id}-cluster" - - -@pytest.fixture(scope="session") -def data_instance_populated( - admin_client, - data_instance_id, - instance_labels, - data_cluster_id, - location_id, - serve_nodes, - in_emulator, -): - instance = admin_client.instance(data_instance_id, labels=instance_labels) - # Emulator does not support instance admin operations (create / delete). - # See: https://cloud.google.com/bigtable/docs/emulator - if not in_emulator: - cluster = instance.cluster( - data_cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - ) - operation = instance.create(clusters=[cluster]) - operation.result(timeout=240) - - yield instance - - if not in_emulator: - _helpers.retry_429(instance.delete)() - - -@pytest.fixture(scope="function") -def instances_to_delete(): - instances_to_delete = [] - - yield instances_to_delete - - for instance in instances_to_delete: - _helpers.retry_429(instance.delete)() - - -@pytest.fixture(scope="session") -def min_serve_nodes(in_emulator): - return 1 - - -@pytest.fixture(scope="session") -def max_serve_nodes(in_emulator): - return 8 +pytest_plugins = [ + "data.setup_fixtures", +] @pytest.fixture(scope="session") -def cpu_utilization_percent(in_emulator): - return 10 +def event_loop(): + loop = asyncio.new_event_loop() + yield loop + loop.stop() + loop.close() diff --git a/tests/system/cross_sync/test_cases/async_to_sync.yaml b/tests/system/cross_sync/test_cases/async_to_sync.yaml new file mode 100644 index 000000000..99d39cbc5 --- /dev/null +++ b/tests/system/cross_sync/test_cases/async_to_sync.yaml @@ -0,0 +1,76 @@ +tests: + - description: "async for loop fn" + before: | + async def func_name(): + async for i in range(10): + await routine() + return 42 + transformers: [AsyncToSync] + after: | + def func_name(): + for i in range(10): + routine() + return 42 + + - description: "async with statement" + before: | + async def func_name(): + async with context_manager() as cm: + await do_something(cm) + transformers: [AsyncToSync] + after: | + def func_name(): + with context_manager() as cm: + do_something(cm) + + - description: "async function definition" + before: | + async def async_function(param1, param2): + result = await some_coroutine() + return result + transformers: [AsyncToSync] + after: | + def async_function(param1, param2): + result = some_coroutine() + return result + + - description: "list comprehension with async for" + before: | + async def func_name(): + result = [x async for x in aiter() if await predicate(x)] + transformers: [AsyncToSync] + after: | + def func_name(): + result = [x for x in aiter() if predicate(x)] + + - description: "multiple async features in one function" + before: | + async def complex_function(): + async with resource_manager() as res: + async for item in res.items(): + if await check(item): + yield await process(item) + transformers: [AsyncToSync] + after: | + def complex_function(): + with resource_manager() as res: + for item in res.items(): + if check(item): + yield process(item) + + - description: "nested async constructs" + before: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + transformers: [AsyncToSync] + after: | + def nested_async(): + with outer_context(): + for x in outer_iter(): + with inner_context(x): + for y in inner_iter(x): + process(x, y) diff --git a/tests/system/cross_sync/test_cases/cross_sync_files.yaml b/tests/system/cross_sync/test_cases/cross_sync_files.yaml new file mode 100644 index 000000000..5666325ce --- /dev/null +++ b/tests/system/cross_sync/test_cases/cross_sync_files.yaml @@ -0,0 +1,469 @@ +tests: + - description: "No output annotation" + before: | + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: null + + - description: "CrossSync.convert_class with default sync_name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class + class MyClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with custom sync_name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with replace_symbols" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + replace_symbols={"AsyncBase": "SyncBase", "ParentA": "ParentB"} + ) + class MyAsyncClass(ParentA): + def __init__(self, base: AsyncBase): + self.base = base + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass(ParentB): + + def __init__(self, base: SyncBase): + self.base = base + + - description: "CrossSync.convert_class with docstring formatting" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + docstring_format_vars={"type": ("async", "sync")} + ) + class MyAsyncClass: + """This is a {type} class.""" + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + """This is a sync class.""" + + - description: "CrossSync.convert_class with multiple decorators and methods" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + @some_other_decorator + class MyAsyncClass: + @CrossSync.convert(rm_aio=False) + async def my_method(self): + async with self.base.connection(): + return await self.base.my_method() + + @CrossSync.drop + async def async_only_method(self): + await self.async_operation() + + def sync_method(self): + return "This method stays the same" + + @CrossSync.pytest_fixture + def fixture(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + @some_other_decorator + class MyClass: + + def my_method(self): + async with self.base.connection(): + return await self.base.my_method() + + def sync_method(self): + return "This method stays the same" + + @pytest.fixture() + def fixture(self): + pass + + - description: "CrossSync.convert_class with nested classes drop" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + @CrossSync.drop + class NestedAsyncClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with nested classes explicit" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass", replace_symbols={"AsyncBase": "SyncBase"}) + class MyAsyncClass: + @CrossSync.convert_class + class NestedClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + class NestedClass: + + async def nested_method(self, base: SyncBase): + pass + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with nested classes implicit" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass", replace_symbols={"AsyncBase": "SyncBase"}) + class MyAsyncClass: + + class NestedClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + class NestedClass: + + async def nested_method(self, base: SyncBase): + pass + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with add_mapping" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + add_mapping_for_name="MyClass" + ) + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + @CrossSync._Sync_Impl.add_mapping_decorator("MyClass") + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with rm_aio" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(rm_aio=True) + class MyClass: + async def my_method(self): + async for item in self.items: + await self.process(item) + transformers: [CrossSyncFileProcessor] + after: | + class MyClass: + + def my_method(self): + for item in self.items: + self.process(item) + + - description: "CrossSync.convert_class with CrossSync calls" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + @CrossSync.convert + async def my_method(self): + async with CrossSync.rm_aio(CrossSync.Condition()) as c: + CrossSync.rm_aio(await CrossSync.yield_to_event_loop()) + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + def my_method(self): + with CrossSync._Sync_Impl.Condition() as c: + CrossSync._Sync_Impl.yield_to_event_loop() + + - description: "Convert async method with @CrossSync.convert" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert + async def my_method(self, arg): + pass + transformers: [CrossSyncFileProcessor] + after: | + def my_method(self, arg): + pass + + - description: "Convert async method with custom sync name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(sync_name="sync_method") + async def async_method(self, arg): + return await self.helper(arg) + transformers: [CrossSyncFileProcessor] + after: | + def sync_method(self, arg): + return self.helper(arg) + + - description: "Convert async method with rm_aio=True" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(rm_aio=True) + async def async_method(self): + async with self.lock: + async for item in self.items: + await self.process(item) + transformers: [CrossSyncFileProcessor] + after: | + def async_method(self): + with self.lock: + for item in self.items: + self.process(item) + + - description: "Drop method from sync version" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + def keep_method(self): + pass + + @CrossSync.drop + async def async_only_method(self): + await self.async_operation() + transformers: [CrossSyncFileProcessor] + after: | + def keep_method(self): + pass + + - description: "Drop class from sync version" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.drop + class DropMe: + pass + class Keeper: + pass + transformers: [CrossSyncFileProcessor] + after: | + class Keeper: + pass + + - description: "Convert.pytest" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest + async def test_async_function(): + result = await async_operation() + assert result == expected_value + transformers: [CrossSyncFileProcessor] + after: | + def test_async_function(): + result = async_operation() + assert result == expected_value + + - description: "CrossSync.pytest with rm_aio=False" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest(rm_aio=False) + async def test_partial_async(): + async with context_manager(): + result = await async_function() + assert result == expected_value + transformers: [CrossSyncFileProcessor] + after: | + def test_partial_async(): + async with context_manager(): + result = await async_function() + assert result == expected_value + + - description: "Convert async pytest fixture" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest_fixture + @CrossSync.convert(rm_aio=True) + async def my_fixture(): + resource = await setup_resource() + yield resource + await cleanup_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture() + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + + - description: "Convert pytest fixture with custom parameters" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest_fixture(scope="module", autouse=True) + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture(scope="module", autouse=True) + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + + - description: "Convert method with multiple stacked decorators" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(sync_name="sync_multi_decorated") + @CrossSync.pytest + @some_other_decorator + async def async_multi_decorated(self, arg): + result = await self.async_operation(arg) + return result + transformers: [CrossSyncFileProcessor] + after: | + @some_other_decorator + def sync_multi_decorated(self, arg): + result = self.async_operation(arg) + return result + + - description: "Convert method with multiple stacked decorators in class" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class + class MyClass: + @CrossSync.convert(sync_name="sync_multi_decorated") + @CrossSync.pytest + @some_other_decorator + async def async_multi_decorated(self, arg): + result = await self.async_operation(arg) + return result + transformers: [CrossSyncFileProcessor] + after: | + class MyClass: + + @some_other_decorator + def sync_multi_decorated(self, arg): + result = self.async_operation(arg) + return result + + - description: "Convert method with stacked decorators including rm_aio" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(rm_aio=True) + @CrossSync.pytest_fixture(scope="function") + @another_decorator + async def async_fixture_with_context(): + async with some_async_context(): + resource = await setup_async_resource() + yield resource + await cleanup_async_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture(scope="function") + @another_decorator + def async_fixture_with_context(): + with some_async_context(): + resource = setup_async_resource() + yield resource + cleanup_async_resource(resource) + + - description: "Handle CrossSync.is_async conditional" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + if CrossSync.is_async: + import a + else: + import b + + def my_method(self): + if CrossSync.is_async: + return "async version" + else: + return "sync version" + transformers: [CrossSyncFileProcessor] + after: | + import b + + def my_method(self): + return "sync version" + + - description: "Replace CrossSync symbols" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + CrossSync.sleep(1) + @CrossSync.convert_class + class MyClass: + event = CrossSync.Event() + def my_method(self): + return CrossSync.some_function() + transformers: [CrossSyncFileProcessor] + after: | + CrossSync._Sync_Impl.sleep(1) + class MyClass: + event = CrossSync._Sync_Impl.Event() + def my_method(self): + return CrossSync._Sync_Impl.some_function() diff --git a/tests/system/cross_sync/test_cases/rm_aio.yaml b/tests/system/cross_sync/test_cases/rm_aio.yaml new file mode 100644 index 000000000..89acda630 --- /dev/null +++ b/tests/system/cross_sync/test_cases/rm_aio.yaml @@ -0,0 +1,109 @@ +tests: + - description: "remove await" + before: | + CrossSync.rm_aio(await routine()) + transformers: [RmAioFunctions] + after: | + routine() + - description: "async for loop fn" + before: | + async def func_name(): + async for i in CrossSync.rm_aio(range(10)): + await routine() + return 42 + transformers: [RmAioFunctions] + after: | + async def func_name(): + for i in range(10): + await routine() + return 42 + + - description: "async with statement" + before: | + async def func_name(): + async with CrossSync.rm_aio(context_manager()) as cm: + await do_something(cm) + transformers: [RmAioFunctions] + after: | + async def func_name(): + with context_manager() as cm: + await do_something(cm) + + - description: "list comprehension with async for" + before: | + async def func_name(): + result = CrossSync.rm_aio([x async for x in aiter() if await predicate(x)]) + transformers: [RmAioFunctions] + after: | + async def func_name(): + result = [x for x in aiter() if predicate(x)] + + - description: "multiple async features in one call" + before: | + CrossSync.rm_aio([x async for x in aiter() if await predicate(x)] + await routine()) + transformers: [RmAioFunctions] + after: | + [x for x in aiter() if predicate(x)] + routine() + + - description: "do nothing with no CrossSync.rm_aio" + before: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + transformers: [RmAioFunctions] + after: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + + - description: "nested async for loops with rm_aio" + before: | + async def nested_loops(): + async for x in CrossSync.rm_aio(outer_iter()): + async for y in CrossSync.rm_aio(inner_iter(x)): + await process(x, y) + transformers: [RmAioFunctions] + after: | + async def nested_loops(): + for x in outer_iter(): + for y in inner_iter(x): + await process(x, y) + + - description: "async generator function with rm_aio" + before: | + async def async_gen(): + yield CrossSync.rm_aio(await async_value()) + async for item in CrossSync.rm_aio(async_iterator()): + yield item + transformers: [RmAioFunctions] + after: | + async def async_gen(): + yield async_value() + for item in async_iterator(): + yield item + + - description: "async with statement with multiple context managers" + before: | + async def multi_context(): + async with CrossSync.rm_aio(cm1()), CrossSync.rm_aio(cm2()) as c2, CrossSync.rm_aio(cm3()) as c3: + await do_something(c2, c3) + transformers: [RmAioFunctions] + after: | + async def multi_context(): + with cm1(), cm2() as c2, cm3() as c3: + await do_something(c2, c3) + + - description: "async comprehension with multiple async for and if clauses" + before: | + async def complex_comprehension(): + result = CrossSync.rm_aio([x async for x in aiter1() if await pred1(x) async for y in aiter2(x) if await pred2(y)]) + transformers: [RmAioFunctions] + after: | + async def complex_comprehension(): + result = [x for x in aiter1() if pred1(x) for y in aiter2(x) if pred2(y)] diff --git a/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml b/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml new file mode 100644 index 000000000..0c192fb37 --- /dev/null +++ b/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml @@ -0,0 +1,74 @@ +tests: + - description: "top level conditional" + before: | + if CrossSync.is_async: + print("async") + else: + print("sync") + transformers: [StripAsyncConditionalBranches] + after: | + print("sync") + - description: "nested conditional" + before: | + if CrossSync.is_async: + print("async") + else: + print("hello") + if CrossSync.is_async: + print("async") + else: + print("world") + transformers: [StripAsyncConditionalBranches] + after: | + print("hello") + print("world") + - description: "conditional within class" + before: | + class MyClass: + def my_method(self): + if CrossSync.is_async: + return "async result" + else: + return "sync result" + transformers: [StripAsyncConditionalBranches] + after: | + class MyClass: + + def my_method(self): + return "sync result" + - description: "multiple branches" + before: | + if CrossSync.is_async: + print("async branch 1") + elif some_condition: + print("other condition") + elif CrossSync.is_async: + print("async branch 2") + else: + print("sync branch") + transformers: [StripAsyncConditionalBranches] + after: | + if some_condition: + print("other condition") + else: + print("sync branch") + - description: "negated conditionals" + before: | + if not CrossSync.is_async: + print("sync code") + else: + print("async code") + + transformers: [StripAsyncConditionalBranches] + after: | + print("sync code") + - description: "is check" + before: | + if CrossSync.is_async is True: + print("async code") + else: + print("sync code") + + transformers: [StripAsyncConditionalBranches] + after: | + print("sync code") diff --git a/tests/system/cross_sync/test_cases/symbol_replacer.yaml b/tests/system/cross_sync/test_cases/symbol_replacer.yaml new file mode 100644 index 000000000..fa50045f8 --- /dev/null +++ b/tests/system/cross_sync/test_cases/symbol_replacer.yaml @@ -0,0 +1,82 @@ +tests: + - description: "Does not Replace function name" + before: | + def function(): + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"function": "new_function"} + after: | + def function(): + pass + + - description: "Does not replace async function name" + before: | + async def async_func(): + await old_coroutine() + transformers: + - name: SymbolReplacer + args: + replacements: {"async_func": "new_async_func", "old_coroutine": "new_coroutine"} + after: | + async def async_func(): + await new_coroutine() + + - description: "Replace method call" + before: | + result = obj.old_method() + transformers: + - name: SymbolReplacer + args: + replacements: {"old_method": "new_method"} + after: | + result = obj.new_method() + + - description: "Replace in docstring" + before: | + def func(): + """This is a docstring mentioning old_name.""" + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"old_name": "new_name"} + after: | + def func(): + """This is a docstring mentioning new_name.""" + pass + + - description: "Replace in type annotation" + before: | + def func(param: OldType) -> OldReturnType: + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"OldType": "NewType", "OldReturnType": "NewReturnType"} + after: | + def func(param: NewType) -> NewReturnType: + pass + + - description: "Replace in nested attribute" + before: | + result = obj.attr1.attr2.old_attr + transformers: + - name: SymbolReplacer + args: + replacements: {"old_attr": "new_attr"} + after: | + result = obj.attr1.attr2.new_attr + + - description: "No replacement when symbol not found" + before: | + def unchanged_function(): + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"non_existent": "replacement"} + after: | + def unchanged_function(): + pass diff --git a/tests/system/cross_sync/test_cross_sync_e2e.py b/tests/system/cross_sync/test_cross_sync_e2e.py new file mode 100644 index 000000000..86911b163 --- /dev/null +++ b/tests/system/cross_sync/test_cross_sync_e2e.py @@ -0,0 +1,65 @@ +import ast +import sys +import os +import black +import pytest +import yaml + +# add cross_sync to path +test_dir_name = os.path.dirname(__file__) +cross_sync_path = os.path.join(test_dir_name, "..", "..", "..", ".cross_sync") +sys.path.append(cross_sync_path) + +from transformers import ( # noqa: F401 E402 + SymbolReplacer, + AsyncToSync, + RmAioFunctions, + StripAsyncConditionalBranches, + CrossSyncFileProcessor, +) + + +def loader(): + dir_name = os.path.join(test_dir_name, "test_cases") + for file_name in os.listdir(dir_name): + if not file_name.endswith(".yaml"): + print(f"Skipping {file_name}") + continue + test_case_file = os.path.join(dir_name, file_name) + # load test cases + with open(test_case_file) as f: + print(f"Loading test cases from {test_case_file}") + test_cases = yaml.safe_load(f) + for test in test_cases["tests"]: + test["file_name"] = file_name + yield test + + +@pytest.mark.parametrize( + "test_dict", loader(), ids=lambda x: f"{x['file_name']}: {x.get('description', '')}" +) +@pytest.mark.skipif( + sys.version_info < (3, 9), reason="ast.unparse requires python3.9 or higher" +) +def test_e2e_scenario(test_dict): + before_ast = ast.parse(test_dict["before"]) + got_ast = before_ast + for transformer_info in test_dict["transformers"]: + # transformer can be passed as a string, or a dict with name and args + if isinstance(transformer_info, str): + transformer_class = globals()[transformer_info] + transformer_args = {} + else: + transformer_class = globals()[transformer_info["name"]] + transformer_args = transformer_info.get("args", {}) + transformer = transformer_class(**transformer_args) + got_ast = transformer.visit(got_ast) + if got_ast is None: + final_str = "" + else: + final_str = black.format_str(ast.unparse(got_ast), mode=black.FileMode()) + if test_dict.get("after") is None: + expected_str = "" + else: + expected_str = black.format_str(test_dict["after"], mode=black.FileMode()) + assert final_str == expected_str, f"Expected:\n{expected_str}\nGot:\n{final_str}" diff --git a/tests/system/data/__init__.py b/tests/system/data/__init__.py new file mode 100644 index 000000000..2b35cea8f --- /dev/null +++ b/tests/system/data/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +TEST_FAMILY = "test-family" +TEST_FAMILY_2 = "test-family-2" +TEST_AGGREGATE_FAMILY = "test-aggregate-family" diff --git a/tests/system/data/setup_fixtures.py b/tests/system/data/setup_fixtures.py new file mode 100644 index 000000000..169e2396b --- /dev/null +++ b/tests/system/data/setup_fixtures.py @@ -0,0 +1,210 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Contains a set of pytest fixtures for setting up and populating a +Bigtable database for testing purposes. +""" + +import pytest +import os +import uuid + +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY + +# authorized view subset to allow all qualifiers +ALLOW_ALL = "" +ALL_QUALIFIERS = {"qualifier_prefixes": [ALLOW_ALL]} + + +@pytest.fixture(scope="session") +def admin_client(): + """ + Client for interacting with Table and Instance admin APIs + """ + from google.cloud.bigtable.client import Client + + client = Client(admin=True) + yield client + + +@pytest.fixture(scope="session") +def instance_id(admin_client, project_id, cluster_config): + """ + Returns BIGTABLE_TEST_INSTANCE if set, otherwise creates a new temporary instance for the test session + """ + from google.cloud.bigtable_admin_v2 import types + from google.api_core import exceptions + from google.cloud.environment_vars import BIGTABLE_EMULATOR + + # use user-specified instance if available + user_specified_instance = os.getenv("BIGTABLE_TEST_INSTANCE") + if user_specified_instance: + print("Using user-specified instance: {}".format(user_specified_instance)) + yield user_specified_instance + return + + # create a new temporary test instance + instance_id = f"python-bigtable-tests-{uuid.uuid4().hex[:6]}" + if os.getenv(BIGTABLE_EMULATOR): + # don't create instance if in emulator mode + yield instance_id + else: + try: + operation = admin_client.instance_admin_client.create_instance( + parent=f"projects/{project_id}", + instance_id=instance_id, + instance=types.Instance( + display_name="Test Instance", + # labels={"python-system-test": "true"}, + ), + clusters=cluster_config, + ) + operation.result(timeout=240) + except exceptions.AlreadyExists: + pass + yield instance_id + admin_client.instance_admin_client.delete_instance( + name=f"projects/{project_id}/instances/{instance_id}" + ) + + +@pytest.fixture(scope="session") +def column_split_config(): + """ + specify initial splits to create when creating a new test table + """ + return [(num * 1000).to_bytes(8, "big") for num in range(1, 10)] + + +@pytest.fixture(scope="session") +def table_id( + admin_client, + project_id, + instance_id, + column_family_config, + init_table_id, + column_split_config, +): + """ + Returns BIGTABLE_TEST_TABLE if set, otherwise creates a new temporary table for the test session + + Args: + - admin_client: Client for interacting with the Table Admin API. Supplied by the admin_client fixture. + - project_id: The project ID of the GCP project to test against. Supplied by the project_id fixture. + - instance_id: The ID of the Bigtable instance to test against. Supplied by the instance_id fixture. + - init_column_families: A list of column families to initialize the table with, if pre-initialized table is not given with BIGTABLE_TEST_TABLE. + Supplied by the init_column_families fixture. + - init_table_id: The table ID to give to the test table, if pre-initialized table is not given with BIGTABLE_TEST_TABLE. + Supplied by the init_table_id fixture. + - column_split_config: A list of row keys to use as initial splits when creating the test table. + """ + from google.api_core import exceptions + from google.api_core import retry + + # use user-specified instance if available + user_specified_table = os.getenv("BIGTABLE_TEST_TABLE") + if user_specified_table: + print("Using user-specified table: {}".format(user_specified_table)) + yield user_specified_table + return + + retry = retry.Retry( + predicate=retry.if_exception_type(exceptions.FailedPrecondition) + ) + try: + parent_path = f"projects/{project_id}/instances/{instance_id}" + print(f"Creating table: {parent_path}/tables/{init_table_id}") + admin_client.table_admin_client.create_table( + request={ + "parent": parent_path, + "table_id": init_table_id, + "table": {"column_families": column_family_config}, + "initial_splits": [{"key": key} for key in column_split_config], + }, + retry=retry, + ) + except exceptions.AlreadyExists: + pass + yield init_table_id + print(f"Deleting table: {parent_path}/tables/{init_table_id}") + try: + admin_client.table_admin_client.delete_table( + name=f"{parent_path}/tables/{init_table_id}" + ) + except exceptions.NotFound: + print(f"Table {init_table_id} not found, skipping deletion") + + +@pytest.fixture(scope="session") +def authorized_view_id( + admin_client, + project_id, + instance_id, + table_id, +): + """ + Creates and returns a new temporary authorized view for the test session + + Args: + - admin_client: Client for interacting with the Table Admin API. Supplied by the admin_client fixture. + - project_id: The project ID of the GCP project to test against. Supplied by the project_id fixture. + - instance_id: The ID of the Bigtable instance to test against. Supplied by the instance_id fixture. + - table_id: The ID of the table to create the authorized view for. Supplied by the table_id fixture. + """ + from google.api_core import exceptions + from google.api_core import retry + + retry = retry.Retry( + predicate=retry.if_exception_type(exceptions.FailedPrecondition) + ) + new_view_id = uuid.uuid4().hex[:8] + parent_path = f"projects/{project_id}/instances/{instance_id}/tables/{table_id}" + new_path = f"{parent_path}/authorizedViews/{new_view_id}" + try: + print(f"Creating view: {new_path}") + admin_client.table_admin_client.create_authorized_view( + request={ + "parent": parent_path, + "authorized_view_id": new_view_id, + "authorized_view": { + "subset_view": { + "row_prefixes": [ALLOW_ALL], + "family_subsets": { + TEST_FAMILY: ALL_QUALIFIERS, + TEST_FAMILY_2: ALL_QUALIFIERS, + TEST_AGGREGATE_FAMILY: ALL_QUALIFIERS, + }, + }, + }, + }, + retry=retry, + ) + except exceptions.AlreadyExists: + pass + except exceptions.MethodNotImplemented: + # will occur when run in emulator. Pass empty id + new_view_id = None + yield new_view_id + if new_view_id: + print(f"Deleting view: {new_path}") + try: + admin_client.table_admin_client.delete_authorized_view(name=new_path) + except exceptions.NotFound: + print(f"View {new_view_id} not found, skipping deletion") + + +@pytest.fixture(scope="session") +def project_id(client): + """Returns the project ID from the client.""" + yield client.project diff --git a/tests/system/data/test_system_async.py b/tests/system/data/test_system_async.py new file mode 100644 index 000000000..ac8a358a3 --- /dev/null +++ b/tests/system/data/test_system_async.py @@ -0,0 +1,1357 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import datetime +import uuid +import os +from google.api_core import retry +from google.api_core.exceptions import ClientError, PermissionDenied + +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.type import date_pb2 + +from google.cloud.bigtable.data._cross_sync import CrossSync + +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY + +if CrossSync.is_async: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import ( + _LoggingClientAIOInterceptor as GapicInterceptor, + ) +else: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as GapicInterceptor, + ) + +__CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen" + + +TARGETS = ["table"] +if not os.environ.get(BIGTABLE_EMULATOR): + # emulator doesn't support authorized views + TARGETS.append("authorized_view") + + +@CrossSync.convert_class( + sync_name="TempRowBuilder", + add_mapping_for_name="TempRowBuilder", +) +class TempRowBuilderAsync: + """ + Used to add rows to a table for testing purposes. + """ + + def __init__(self, target): + self.rows = [] + self.target = target + + @CrossSync.convert + async def add_row( + self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" + ): + if isinstance(value, str): + value = value.encode("utf-8") + elif isinstance(value, int): + value = value.to_bytes(8, byteorder="big", signed=True) + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "set_cell": { + "family_name": family, + "column_qualifier": qualifier, + "value": value, + } + } + ], + } + await self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + @CrossSync.convert + async def add_aggregate_row( + self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0 + ): + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "add_to_cell": { + "family_name": family, + "column_qualifier": {"raw_value": qualifier}, + "timestamp": {"raw_timestamp_micros": 0}, + "input": {"int_value": input}, + } + } + ], + } + await self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + @CrossSync.convert + async def delete_rows(self): + if self.rows: + request = { + "table_name": self.target.table_name, + "entries": [ + {"row_key": row, "mutations": [{"delete_from_row": {}}]} + for row in self.rows + ], + } + await self.target.client._gapic_client.mutate_rows(request) + + +@CrossSync.convert_class(sync_name="TestSystem") +class TestSystemAsync: + def _make_client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + return CrossSync.DataClient(project=project) + + @CrossSync.convert + @CrossSync.pytest_fixture(scope="session") + async def client(self): + async with self._make_client() as client: + yield client + + @CrossSync.convert + @CrossSync.pytest_fixture(scope="session", params=TARGETS) + async def target(self, client, table_id, authorized_view_id, instance_id, request): + """ + This fixture runs twice: once for a standard table, and once with an authorized view + + Note: emulator doesn't support authorized views. Only use target + """ + if request.param == "table": + async with client.get_table(instance_id, table_id) as table: + yield table + elif request.param == "authorized_view": + async with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + yield view + else: + raise ValueError(f"unknown target type: {request.param}") + + @pytest.fixture(scope="session") + def column_family_config(self): + """ + specify column families to create when creating a new test table + """ + from google.cloud.bigtable_admin_v2 import types + + int_aggregate_type = types.Type.Aggregate( + input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}), + sum={}, + ) + return { + TEST_FAMILY: types.ColumnFamily(), + TEST_FAMILY_2: types.ColumnFamily(), + TEST_AGGREGATE_FAMILY: types.ColumnFamily( + value_type=types.Type(aggregate_type=int_aggregate_type) + ), + } + + @pytest.fixture(scope="session") + def init_table_id(self): + """ + The table_id to use when creating a new test table + """ + return f"test-table-{uuid.uuid4().hex}" + + @pytest.fixture(scope="session") + def cluster_config(self, project_id): + """ + Configuration for the clusters to use when creating a new instance + """ + from google.cloud.bigtable_admin_v2 import types + + cluster = { + "test-cluster": types.Cluster( + location=f"projects/{project_id}/locations/us-central1-b", + serve_nodes=1, + ) + } + return cluster + + @CrossSync.convert + @pytest.mark.usefixtures("target") + async def _retrieve_cell_value(self, target, row_key): + """ + Helper to read an individual row + """ + from google.cloud.bigtable.data import ReadRowsQuery + + row_list = await target.read_rows(ReadRowsQuery(row_keys=row_key)) + assert len(row_list) == 1 + row = row_list[0] + cell = row.cells[0] + return cell.value + + @CrossSync.convert + async def _create_row_and_mutation( + self, table, temp_rows, *, start_value=b"start", new_value=b"new_value" + ): + """ + Helper to create a new row, and a sample set_cell mutation to change its value + """ + from google.cloud.bigtable.data.mutations import SetCell + + row_key = uuid.uuid4().hex.encode() + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, family=family, qualifier=qualifier, value=start_value + ) + # ensure cell is initialized + assert await self._retrieve_cell_value(table, row_key) == start_value + + mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) + return row_key, mutation + + @CrossSync.convert + @CrossSync.pytest_fixture(scope="function") + async def temp_rows(self, target): + builder = CrossSync.TempRowBuilder(target) + yield builder + await builder.delete_rows() + + @pytest.mark.usefixtures("target") + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 + ) + @CrossSync.pytest + async def test_ping_and_warm_gapic(self, client, target): + """ + Simple ping rpc test + This test ensures channels are able to authenticate with backend + """ + request = {"name": target.instance_name} + await client._gapic_client.ping_and_warm(request) + + @pytest.mark.usefixtures("target") + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_ping_and_warm(self, client, target): + """ + Test ping and warm from handwritten client + """ + results = await client._ping_and_warm_instances() + assert len(results) == 1 + assert results[0] is None + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator mode doesn't refresh channel", + ) + @CrossSync.pytest + async def test_channel_refresh(self, table_id, instance_id, temp_rows): + """ + perform requests while swapping out the grpc channel. Requests should continue without error + """ + import time + + await temp_rows.add_row(b"test_row") + async with self._make_client() as client: + client._channel_refresh_task.cancel() + channel_wrapper = client.transport.grpc_channel + first_channel = channel_wrapper._channel + # swap channels frequently, with large grace windows + client._channel_refresh_task = CrossSync.create_task( + client._manage_channel, + refresh_interval_min=0.1, + refresh_interval_max=0.1, + grace_period=1, + sync_executor=client._executor, + ) + + # hit channels with frequent requests + end_time = time.monotonic() + 3 + async with client.get_table(instance_id, table_id) as table: + while time.monotonic() < end_time: + # we expect a CancelledError if a channel is closed before completion + rows = await table.read_rows({}) + assert len(rows) == 1 + await CrossSync.yield_to_event_loop() + # ensure channel was updated + updated_channel = channel_wrapper._channel + assert updated_channel is not first_channel + # ensure interceptors are kept (gapic's logging interceptor, and metric interceptor) + if CrossSync.is_async: + unary_interceptors = updated_channel._unary_unary_interceptors + assert len(unary_interceptors) == 2 + assert GapicInterceptor in [type(i) for i in unary_interceptors] + assert client._metrics_interceptor in unary_interceptors + stream_interceptors = updated_channel._unary_stream_interceptors + assert len(stream_interceptors) == 1 + assert client._metrics_interceptor in stream_interceptors + else: + assert isinstance( + client.transport._logged_channel._interceptor, GapicInterceptor + ) + assert updated_channel._interceptor == client._metrics_interceptor + + @CrossSync.pytest + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_mutation_set_cell(self, target, temp_rows): + """ + Ensure cells can be set properly + """ + row_key = b"bulk_mutate" + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + await target.mutate_row(row_key, mutation) + + # ensure cell is updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + + @CrossSync.pytest + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_mutation_add_to_cell(self, target, temp_rows): + """ + Test add to cell mutation + """ + from google.cloud.bigtable.data.mutations import AddToCell + + row_key = b"add_to_cell" + family = TEST_AGGREGATE_FAMILY + qualifier = b"test-qualifier" + # add row to temp_rows, for future deletion + await temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier) + # set and check cell value + await target.mutate_row( + row_key, AddToCell(family, qualifier, 1, timestamp_micros=0) + ) + encoded_result = await self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 1 + # update again + await target.mutate_row( + row_key, AddToCell(family, qualifier, 9, timestamp_micros=0) + ) + encoded_result = await self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 10 + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_sample_row_keys( + self, client, target, temp_rows, column_split_config + ): + """ + Sample keys should return a single sample in small test targets + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + results = await target.sample_row_keys() + assert len(results) == len(column_split_config) + 1 + # first keys should match the split config + for idx in range(len(column_split_config)): + assert results[idx][0] == column_split_config[idx] + assert isinstance(results[idx][1], int) + # last sample should be empty key + assert results[-1][0] == b"" + assert isinstance(results[-1][1], int) + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_bulk_mutations_set_cell(self, client, target, temp_rows): + """ + Ensure cells can be set properly + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + await target.bulk_mutate_rows([bulk_mutation]) + + # ensure cell is updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + + @CrossSync.pytest + async def test_bulk_mutations_raise_exception(self, client, target): + """ + If an invalid mutation is passed, an exception should be raised + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + row_key = uuid.uuid4().hex.encode() + mutation = SetCell( + family="nonexistent", qualifier=b"test-qualifier", new_value=b"" + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + with pytest.raises(MutationsExceptionGroup) as exc: + await target.bulk_mutate_rows([bulk_mutation]) + assert len(exc.value.exceptions) == 1 + entry_error = exc.value.exceptions[0] + assert isinstance(entry_error, FailedMutationEntryError) + assert entry_error.index == 0 + assert entry_error.entry == bulk_mutation + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_context_manager(self, client, target, temp_rows): + """ + test batcher with context manager. Should flush on exit + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + row_key2, mutation2 = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with target.mutations_batcher() as batcher: + await batcher.append(bulk_mutation) + await batcher.append(bulk_mutation2) + # ensure cell is updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_timer_flush(self, client, target, temp_rows): + """ + batch should occur after flush_interval seconds + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + flush_interval = 0.1 + async with target.mutations_batcher(flush_interval=flush_interval) as batcher: + await batcher.append(bulk_mutation) + await CrossSync.yield_to_event_loop() + assert len(batcher._staged_entries) == 1 + await CrossSync.sleep(flush_interval + 0.1) + assert len(batcher._staged_entries) == 0 + # ensure cell is updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_count_flush(self, client, target, temp_rows): + """ + batch should flush after flush_limit_mutation_count mutations + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with target.mutations_batcher(flush_limit_mutation_count=2) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + # should be noop; flush not scheduled + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + # let flush complete + for future in list(batcher._flush_jobs): + await future + # for sync version: grab result + future.result() + assert len(batcher._staged_entries) == 0 + assert len(batcher._flush_jobs) == 0 + # ensure cells were updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key2)) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): + """ + batch should flush after flush_limit_bytes bytes + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 + + async with target.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + assert len(batcher._staged_entries) == 0 + # let flush complete + for future in list(batcher._flush_jobs): + await future + # for sync version: grab result + future.result() + # ensure cells were updated + assert (await self._retrieve_cell_value(target, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key2)) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_mutations_batcher_no_flush(self, client, target, temp_rows): + """ + test with no flush requirements met + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + start_value = b"unchanged" + row_key, mutation = await self._create_row_and_mutation( + target, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + target, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 + async with target.mutations_batcher( + flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 + ) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # flush not scheduled + assert len(batcher._flush_jobs) == 0 + await CrossSync.yield_to_event_loop() + assert len(batcher._staged_entries) == 2 + assert len(batcher._flush_jobs) == 0 + # ensure cells were not updated + assert (await self._retrieve_cell_value(target, row_key)) == start_value + assert (await self._retrieve_cell_value(target, row_key2)) == start_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_large_batch(self, client, target, temp_rows): + """ + test batcher with large batch of mutations + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + + add_mutation = SetCell( + family=TEST_FAMILY, qualifier=b"test-qualifier", new_value=b"a" + ) + row_mutations = [] + for i in range(50_000): + row_key = uuid.uuid4().hex.encode() + row_mutations.append(RowMutationEntry(row_key, [add_mutation])) + # append row key for eventual deletion + temp_rows.rows.append(row_key) + + async with target.mutations_batcher() as batcher: + for mutation in row_mutations: + await batcher.append(mutation) + # ensure cell is updated + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start,increment,expected", + [ + (0, 0, 0), + (0, 1, 1), + (0, -1, -1), + (1, 0, 1), + (0, -100, -100), + (0, 3000, 3000), + (10, 4, 14), + (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), + (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), + (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), + ], + ) + @CrossSync.pytest + async def test_read_modify_write_row_increment( + self, client, target, temp_rows, start, increment, expected + ): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, value=start, family=family, qualifier=qualifier + ) + + rule = IncrementRule(family, qualifier, increment) + result = await target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert int(result[0]) == expected + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start,append,expected", + [ + (b"", b"", b""), + ("", "", b""), + (b"abc", b"123", b"abc123"), + (b"abc", "123", b"abc123"), + ("", b"1", b"1"), + (b"abc", "", b"abc"), + (b"hello", b"world", b"helloworld"), + ], + ) + @CrossSync.pytest + async def test_read_modify_write_row_append( + self, client, target, temp_rows, start, append, expected + ): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, value=start, family=family, qualifier=qualifier + ) + + rule = AppendValueRule(family, qualifier, append) + result = await target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert result[0].value == expected + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_read_modify_write_row_chained(self, client, target, temp_rows): + """ + test read_modify_write_row with multiple rules + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + start_amount = 1 + increment_amount = 10 + await temp_rows.add_row( + row_key, value=start_amount, family=family, qualifier=qualifier + ) + rule = [ + IncrementRule(family, qualifier, increment_amount), + AppendValueRule(family, qualifier, "hello"), + AppendValueRule(family, qualifier, "world"), + AppendValueRule(family, qualifier, "!"), + ] + result = await target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert result[0].family == family + assert result[0].qualifier == qualifier + # result should be a bytes number string for the IncrementRules, followed by the AppendValueRule values + assert ( + result[0].value + == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + + b"helloworld!" + ) + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start_val,predicate_range,expected_result", + [ + (1, (0, 2), True), + (-1, (0, 2), False), + ], + ) + @CrossSync.pytest + async def test_check_and_mutate( + self, client, target, temp_rows, start_val, predicate_range, expected_result + ): + """ + test that check_and_mutate_row works applies the right mutations, and returns the right result + """ + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + + await temp_rows.add_row( + row_key, value=start_val, family=family, qualifier=qualifier + ) + + false_mutation_value = b"false-mutation-value" + false_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value + ) + true_mutation_value = b"true-mutation-value" + true_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value + ) + predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) + result = await target.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + assert result == expected_result + # ensure cell is updated + expected_value = ( + true_mutation_value if expected_result else false_mutation_value + ) + assert (await self._retrieve_cell_value(target, row_key)) == expected_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_check_and_mutate_empty_request(self, client, target): + """ + check_and_mutate with no true or fale mutations should raise an error + """ + from google.api_core import exceptions + + with pytest.raises(exceptions.InvalidArgument) as e: + await target.check_and_mutate_row( + b"row_key", None, true_case_mutations=None, false_case_mutations=None + ) + assert "No mutations provided" in str(e.value) + + @pytest.mark.usefixtures("target") + @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_stream(self, target, temp_rows): + """ + Ensure that the read_rows_stream method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + # full table scan + generator = await target.read_rows_stream({}) + first_row = await generator.__anext__() + second_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + assert second_row.row_key == b"row_key_2" + with pytest.raises(CrossSync.StopIteration): + await generator.__anext__() + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows(self, target, temp_rows): + """ + Ensure that the read_rows method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + row_list = await target.read_rows({}) + assert len(row_list) == 2 + assert row_list[0].row_key == b"row_key_1" + assert row_list[1].row_key == b"row_key_2" + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_simple(self, target, temp_rows): + """ + Test read rows sharded with two queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) + row_list = await target.read_rows_sharded([query1, query2]) + assert len(row_list) == 4 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"b" + assert row_list[3].row_key == b"d" + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_from_sample(self, target, temp_rows): + """ + Test end-to-end sharding + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + table_shard_keys = await target.sample_row_keys() + query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) + shard_queries = query.shard(table_shard_keys) + row_list = await target.read_rows_sharded(shard_queries) + assert len(row_list) == 3 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"d" + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_filters_limits(self, target, temp_rows): + """ + Test read rows sharded with filters and limits + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + label_filter1 = ApplyLabelFilter("first") + label_filter2 = ApplyLabelFilter("second") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) + row_list = await target.read_rows_sharded([query1, query2]) + assert len(row_list) == 3 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"b" + assert row_list[2].row_key == b"d" + assert row_list[0][0].labels == ["first"] + assert row_list[1][0].labels == ["second"] + assert row_list[2][0].labels == ["second"] + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_range_query(self, target, temp_rows): + """ + Ensure that the read_rows method works + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # full table scan + query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) + row_list = await target.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_single_key_query(self, target, temp_rows): + """ + Ensure that the read_rows method works with specified query + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve specific keys + query = ReadRowsQuery(row_keys=[b"a", b"c"]) + row_list = await target.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_with_filter(self, target, temp_rows): + """ + ensure filters are applied + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve keys with filter + expected_label = "test-label" + row_filter = ApplyLabelFilter(expected_label) + query = ReadRowsQuery(row_filter=row_filter) + row_list = await target.read_rows(query) + assert len(row_list) == 4 + for row in row_list: + assert row[0].labels == [expected_label] + + @pytest.mark.usefixtures("target") + @CrossSync.convert(replace_symbols={"__anext__": "__next__", "aclose": "close"}) + @CrossSync.pytest + async def test_read_rows_stream_close(self, target, temp_rows): + """ + Ensure that the read_rows_stream can be closed + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + query = ReadRowsQuery() + generator = await target.read_rows_stream(query) + # grab first row + first_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + # close stream early + await generator.aclose() + with pytest.raises(CrossSync.StopIteration): + await generator.__anext__() + + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_read_row(self, target, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + + await temp_rows.add_row(b"row_key_1", value=b"value") + row = await target.read_row(b"row_key_1") + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_read_row_missing(self, target): + """ + Test read_row when row does not exist + """ + from google.api_core import exceptions + + row_key = "row_key_not_exist" + result = await target.read_row(row_key) + assert result is None + with pytest.raises(exceptions.InvalidArgument) as e: + await target.read_row("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_read_row_w_filter(self, target, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"row_key_1", value=b"value") + expected_label = "test-label" + label_filter = ApplyLabelFilter(expected_label) + row = await target.read_row(b"row_key_1", row_filter=label_filter) + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + assert row.cells[0].labels == [expected_label] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("target") + @CrossSync.pytest + async def test_row_exists(self, target, temp_rows): + from google.api_core import exceptions + + """Test row_exists with rows that exist and don't exist""" + assert await target.row_exists(b"row_key_1") is False + await temp_rows.add_row(b"row_key_1") + assert await target.row_exists(b"row_key_1") is True + assert await target.row_exists("row_key_1") is True + assert await target.row_exists(b"row_key_2") is False + assert await target.row_exists("row_key_2") is False + assert await target.row_exists("3") is False + await temp_rows.add_row(b"3") + assert await target.row_exists(b"3") is True + with pytest.raises(exceptions.InvalidArgument) as e: + await target.row_exists("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @pytest.mark.parametrize( + "cell_value,filter_input,expect_match", + [ + (b"abc", b"abc", True), + (b"abc", "abc", True), + (b".", ".", True), + (".*", ".*", True), + (".*", b".*", True), + ("a", ".*", False), + (b".*", b".*", True), + (r"\a", r"\a", True), + (b"\xe2\x98\x83", "☃", True), + ("☃", "☃", True), + (r"\C☃", r"\C☃", True), + (1, 1, True), + (2, 1, False), + (68, 68, True), + ("D", 68, False), + (68, "D", False), + (-1, -1, True), + (2852126720, 2852126720, True), + (-1431655766, -1431655766, True), + (-1431655766, -1, False), + ], + ) + @CrossSync.pytest + async def test_literal_value_filter( + self, target, temp_rows, cell_value, filter_input, expect_match + ): + """ + Literal value filter does complex escaping on re2 strings. + Make sure inputs are properly interpreted by the server + """ + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery + + f = LiteralValueFilter(filter_input) + await temp_rows.add_row(b"row_key_1", value=cell_value) + query = ReadRowsQuery(row_filter=f) + row_list = await target.read_rows(query) + assert len(row_list) == bool( + expect_match + ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + async def test_authorized_view_unauthenticated( + self, client, authorized_view_id, instance_id, table_id + ): + """ + Requesting family outside authorized family_subset should raise exception + """ + from google.cloud.bigtable.data.mutations import SetCell + + async with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + mutation = SetCell(family="unauthorized", qualifier="q", new_value="v") + with pytest.raises(PermissionDenied) as e: + await view.mutate_row(b"row-key", mutation) + assert "outside the Authorized View" in e.value.message + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_execute_query_simple(self, client, table_id, instance_id): + result = await client.execute_query("SELECT 1 AS a, 'foo' AS b", instance_id) + rows = [r async for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["a"] == 1 + assert row["b"] == "foo" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_against_target( + self, client, instance_id, table_id, temp_rows, column_family_config + ): + await temp_rows.add_row(b"row_key_1") + result = await client.execute_query( + "SELECT * FROM `" + table_id + "`", instance_id + ) + rows = [r async for r in result] + + assert len(rows) == 1 + assert rows[0]["_key"] == b"row_key_1" + family_map = rows[0][TEST_FAMILY] + assert len(family_map) == 1 + assert family_map[b"q"] == b"test-value" + assert len(rows[0][TEST_FAMILY_2]) == 0 + md = result.metadata + # we expect it to fetch each column family, plus _key + # add additional families here if column_family_config changes + assert len(md) == len(column_family_config) + 1 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_query_params(self, client, table_id, instance_id): + query = ( + "SELECT @stringParam AS strCol, @bytesParam as bytesCol, @int64Param AS intCol, " + "@float32Param AS float32Col, @float64Param AS float64Col, @boolParam AS boolCol, " + "@tsParam AS tsCol, @dateParam AS dateCol, @byteArrayParam AS byteArrayCol, " + "@stringArrayParam AS stringArrayCol, @intArrayParam AS intArrayCol, " + "@float32ArrayParam AS float32ArrayCol, @float64ArrayParam AS float64ArrayCol, " + "@boolArrayParam AS boolArrayCol, @tsArrayParam AS tsArrayCol, " + "@dateArrayParam AS dateArrayCol" + ) + parameters = { + "stringParam": "foo", + "bytesParam": b"bar", + "int64Param": 12, + "float32Param": 1.1, + "float64Param": 1.2, + "boolParam": True, + "tsParam": datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + "dateParam": datetime.date(2025, 1, 16), + "byteArrayParam": [b"foo", b"bar", None], + "stringArrayParam": ["foo", "bar", None], + "intArrayParam": [1, None, 2], + "float32ArrayParam": [1.2, None, 1.3], + "float64ArrayParam": [1.4, None, 1.5], + "boolArrayParam": [None, False, True], + "tsArrayParam": [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + "dateArrayParam": [ + datetime.date(2025, 1, 16), + datetime.date(2025, 1, 17), + None, + ], + } + param_types = { + "stringParam": SqlType.String(), + "bytesParam": SqlType.Bytes(), + "int64Param": SqlType.Int64(), + "float32Param": SqlType.Float32(), + "float64Param": SqlType.Float64(), + "boolParam": SqlType.Bool(), + "tsParam": SqlType.Timestamp(), + "dateParam": SqlType.Date(), + "byteArrayParam": SqlType.Array(SqlType.Bytes()), + "stringArrayParam": SqlType.Array(SqlType.String()), + "intArrayParam": SqlType.Array(SqlType.Int64()), + "float32ArrayParam": SqlType.Array(SqlType.Float32()), + "float64ArrayParam": SqlType.Array(SqlType.Float64()), + "boolArrayParam": SqlType.Array(SqlType.Bool()), + "tsArrayParam": SqlType.Array(SqlType.Timestamp()), + "dateArrayParam": SqlType.Array(SqlType.Date()), + } + + result = await client.execute_query( + query, instance_id, parameters=parameters, parameter_types=param_types + ) + rows = [r async for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["strCol"] == parameters["stringParam"] + assert row["bytesCol"] == parameters["bytesParam"] + assert row["intCol"] == parameters["int64Param"] + assert row["float32Col"] == pytest.approx(parameters["float32Param"]) + assert row["float64Col"] == pytest.approx(parameters["float64Param"]) + assert row["boolCol"] == parameters["boolParam"] + assert row["tsCol"] == parameters["tsParam"] + assert row["dateCol"] == date_pb2.Date(year=2025, month=1, day=16) + assert row["stringArrayCol"] == parameters["stringArrayParam"] + assert row["byteArrayCol"] == parameters["byteArrayParam"] + assert row["intArrayCol"] == parameters["intArrayParam"] + assert row["float32ArrayCol"] == pytest.approx(parameters["float32ArrayParam"]) + assert row["float64ArrayCol"] == pytest.approx(parameters["float64ArrayParam"]) + assert row["boolArrayCol"] == parameters["boolArrayParam"] + assert row["tsArrayCol"] == parameters["tsArrayParam"] + assert row["dateArrayCol"] == [ + date_pb2.Date(year=2025, month=1, day=16), + date_pb2.Date(year=2025, month=1, day=17), + None, + ] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_metadata_on_empty_response( + self, client, instance_id, table_id, temp_rows, column_family_config + ): + await temp_rows.add_row(b"row_key_1") + result = await client.execute_query( + "SELECT * FROM `" + table_id + "` WHERE _key='non-existent'", instance_id + ) + rows = [r async for r in result] + + assert len(rows) == 0 + md = result.metadata + # we expect it to fetch each column family, plus _key + # add additional families here if column_family_config change + assert len(md) == len(column_family_config) + 1 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) diff --git a/tests/system/data/test_system_autogen.py b/tests/system/data/test_system_autogen.py new file mode 100644 index 000000000..463235087 --- /dev/null +++ b/tests/system/data/test_system_autogen.py @@ -0,0 +1,1109 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +import datetime +import uuid +import os +from google.api_core import retry +from google.api_core.exceptions import ClientError, PermissionDenied +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.type import date_pb2 +from google.cloud.bigtable.data._cross_sync import CrossSync +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY +from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as GapicInterceptor, +) + +TARGETS = ["table"] +if not os.environ.get(BIGTABLE_EMULATOR): + TARGETS.append("authorized_view") + + +@CrossSync._Sync_Impl.add_mapping_decorator("TempRowBuilder") +class TempRowBuilder: + """ + Used to add rows to a table for testing purposes. + """ + + def __init__(self, target): + self.rows = [] + self.target = target + + def add_row( + self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" + ): + if isinstance(value, str): + value = value.encode("utf-8") + elif isinstance(value, int): + value = value.to_bytes(8, byteorder="big", signed=True) + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "set_cell": { + "family_name": family, + "column_qualifier": qualifier, + "value": value, + } + } + ], + } + self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + def add_aggregate_row( + self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0 + ): + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "add_to_cell": { + "family_name": family, + "column_qualifier": {"raw_value": qualifier}, + "timestamp": {"raw_timestamp_micros": 0}, + "input": {"int_value": input}, + } + } + ], + } + self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + def delete_rows(self): + if self.rows: + request = { + "table_name": self.target.table_name, + "entries": [ + {"row_key": row, "mutations": [{"delete_from_row": {}}]} + for row in self.rows + ], + } + self.target.client._gapic_client.mutate_rows(request) + + +class TestSystem: + def _make_client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + return CrossSync._Sync_Impl.DataClient(project=project) + + @pytest.fixture(scope="session") + def client(self): + with self._make_client() as client: + yield client + + @pytest.fixture(scope="session", params=TARGETS) + def target(self, client, table_id, authorized_view_id, instance_id, request): + """This fixture runs twice: once for a standard table, and once with an authorized view + + Note: emulator doesn't support authorized views. Only use target""" + if request.param == "table": + with client.get_table(instance_id, table_id) as table: + yield table + elif request.param == "authorized_view": + with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + yield view + else: + raise ValueError(f"unknown target type: {request.param}") + + @pytest.fixture(scope="session") + def column_family_config(self): + """specify column families to create when creating a new test table""" + from google.cloud.bigtable_admin_v2 import types + + int_aggregate_type = types.Type.Aggregate( + input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}), + sum={}, + ) + return { + TEST_FAMILY: types.ColumnFamily(), + TEST_FAMILY_2: types.ColumnFamily(), + TEST_AGGREGATE_FAMILY: types.ColumnFamily( + value_type=types.Type(aggregate_type=int_aggregate_type) + ), + } + + @pytest.fixture(scope="session") + def init_table_id(self): + """The table_id to use when creating a new test table""" + return f"test-table-{uuid.uuid4().hex}" + + @pytest.fixture(scope="session") + def cluster_config(self, project_id): + """Configuration for the clusters to use when creating a new instance""" + from google.cloud.bigtable_admin_v2 import types + + cluster = { + "test-cluster": types.Cluster( + location=f"projects/{project_id}/locations/us-central1-b", serve_nodes=1 + ) + } + return cluster + + @pytest.mark.usefixtures("target") + def _retrieve_cell_value(self, target, row_key): + """Helper to read an individual row""" + from google.cloud.bigtable.data import ReadRowsQuery + + row_list = target.read_rows(ReadRowsQuery(row_keys=row_key)) + assert len(row_list) == 1 + row = row_list[0] + cell = row.cells[0] + return cell.value + + def _create_row_and_mutation( + self, table, temp_rows, *, start_value=b"start", new_value=b"new_value" + ): + """Helper to create a new row, and a sample set_cell mutation to change its value""" + from google.cloud.bigtable.data.mutations import SetCell + + row_key = uuid.uuid4().hex.encode() + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row( + row_key, family=family, qualifier=qualifier, value=start_value + ) + assert self._retrieve_cell_value(table, row_key) == start_value + mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) + return (row_key, mutation) + + @pytest.fixture(scope="function") + def temp_rows(self, target): + builder = CrossSync._Sync_Impl.TempRowBuilder(target) + yield builder + builder.delete_rows() + + @pytest.mark.usefixtures("target") + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 + ) + def test_ping_and_warm_gapic(self, client, target): + """Simple ping rpc test + This test ensures channels are able to authenticate with backend""" + request = {"name": target.instance_name} + client._gapic_client.ping_and_warm(request) + + @pytest.mark.usefixtures("target") + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_ping_and_warm(self, client, target): + """Test ping and warm from handwritten client""" + results = client._ping_and_warm_instances() + assert len(results) == 1 + assert results[0] is None + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator mode doesn't refresh channel", + ) + def test_channel_refresh(self, table_id, instance_id, temp_rows): + """perform requests while swapping out the grpc channel. Requests should continue without error""" + import time + + temp_rows.add_row(b"test_row") + with self._make_client() as client: + client._channel_refresh_task.cancel() + channel_wrapper = client.transport.grpc_channel + first_channel = channel_wrapper._channel + client._channel_refresh_task = CrossSync._Sync_Impl.create_task( + client._manage_channel, + refresh_interval_min=0.1, + refresh_interval_max=0.1, + grace_period=1, + sync_executor=client._executor, + ) + end_time = time.monotonic() + 3 + with client.get_table(instance_id, table_id) as table: + while time.monotonic() < end_time: + rows = table.read_rows({}) + assert len(rows) == 1 + CrossSync._Sync_Impl.yield_to_event_loop() + updated_channel = channel_wrapper._channel + assert updated_channel is not first_channel + assert isinstance( + client.transport._logged_channel._interceptor, GapicInterceptor + ) + assert updated_channel._interceptor == client._metrics_interceptor + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutation_set_cell(self, target, temp_rows): + """Ensure cells can be set properly""" + row_key = b"bulk_mutate" + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + target.mutate_row(row_key, mutation) + assert self._retrieve_cell_value(target, row_key) == new_value + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutation_add_to_cell(self, target, temp_rows): + """Test add to cell mutation""" + from google.cloud.bigtable.data.mutations import AddToCell + + row_key = b"add_to_cell" + family = TEST_AGGREGATE_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier) + target.mutate_row(row_key, AddToCell(family, qualifier, 1, timestamp_micros=0)) + encoded_result = self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 1 + target.mutate_row(row_key, AddToCell(family, qualifier, 9, timestamp_micros=0)) + encoded_result = self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 10 + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_sample_row_keys(self, client, target, temp_rows, column_split_config): + """Sample keys should return a single sample in small test targets""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + results = target.sample_row_keys() + assert len(results) == len(column_split_config) + 1 + for idx in range(len(column_split_config)): + assert results[idx][0] == column_split_config[idx] + assert isinstance(results[idx][1], int) + assert results[-1][0] == b"" + assert isinstance(results[-1][1], int) + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + def test_bulk_mutations_set_cell(self, client, target, temp_rows): + """Ensure cells can be set properly""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + target.bulk_mutate_rows([bulk_mutation]) + assert self._retrieve_cell_value(target, row_key) == new_value + + def test_bulk_mutations_raise_exception(self, client, target): + """If an invalid mutation is passed, an exception should be raised""" + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + row_key = uuid.uuid4().hex.encode() + mutation = SetCell( + family="nonexistent", qualifier=b"test-qualifier", new_value=b"" + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + with pytest.raises(MutationsExceptionGroup) as exc: + target.bulk_mutate_rows([bulk_mutation]) + assert len(exc.value.exceptions) == 1 + entry_error = exc.value.exceptions[0] + assert isinstance(entry_error, FailedMutationEntryError) + assert entry_error.index == 0 + assert entry_error.entry == bulk_mutation + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_context_manager(self, client, target, temp_rows): + """test batcher with context manager. Should flush on exit""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + (row_key2, mutation2) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + with target.mutations_batcher() as batcher: + batcher.append(bulk_mutation) + batcher.append(bulk_mutation2) + assert self._retrieve_cell_value(target, row_key) == new_value + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_timer_flush(self, client, target, temp_rows): + """batch should occur after flush_interval seconds""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + flush_interval = 0.1 + with target.mutations_batcher(flush_interval=flush_interval) as batcher: + batcher.append(bulk_mutation) + CrossSync._Sync_Impl.yield_to_event_loop() + assert len(batcher._staged_entries) == 1 + CrossSync._Sync_Impl.sleep(flush_interval + 0.1) + assert len(batcher._staged_entries) == 0 + assert self._retrieve_cell_value(target, row_key) == new_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_count_flush(self, client, target, temp_rows): + """batch should flush after flush_limit_mutation_count mutations""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + with target.mutations_batcher(flush_limit_mutation_count=2) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 1 + for future in list(batcher._flush_jobs): + future + future.result() + assert len(batcher._staged_entries) == 0 + assert len(batcher._flush_jobs) == 0 + assert self._retrieve_cell_value(target, row_key) == new_value + assert self._retrieve_cell_value(target, row_key2) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): + """batch should flush after flush_limit_bytes bytes""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + target, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 + with target.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 1 + assert len(batcher._staged_entries) == 0 + for future in list(batcher._flush_jobs): + future + future.result() + assert self._retrieve_cell_value(target, row_key) == new_value + assert self._retrieve_cell_value(target, row_key2) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + def test_mutations_batcher_no_flush(self, client, target, temp_rows): + """test with no flush requirements met""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + start_value = b"unchanged" + (row_key, mutation) = self._create_row_and_mutation( + target, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + target, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 + with target.mutations_batcher( + flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 + ) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 0 + CrossSync._Sync_Impl.yield_to_event_loop() + assert len(batcher._staged_entries) == 2 + assert len(batcher._flush_jobs) == 0 + assert self._retrieve_cell_value(target, row_key) == start_value + assert self._retrieve_cell_value(target, row_key2) == start_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_large_batch(self, client, target, temp_rows): + """test batcher with large batch of mutations""" + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + + add_mutation = SetCell( + family=TEST_FAMILY, qualifier=b"test-qualifier", new_value=b"a" + ) + row_mutations = [] + for i in range(50000): + row_key = uuid.uuid4().hex.encode() + row_mutations.append(RowMutationEntry(row_key, [add_mutation])) + temp_rows.rows.append(row_key) + with target.mutations_batcher() as batcher: + for mutation in row_mutations: + batcher.append(mutation) + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start,increment,expected", + [ + (0, 0, 0), + (0, 1, 1), + (0, -1, -1), + (1, 0, 1), + (0, -100, -100), + (0, 3000, 3000), + (10, 4, 14), + (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), + (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), + (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), + ], + ) + def test_read_modify_write_row_increment( + self, client, target, temp_rows, start, increment, expected + ): + """test read_modify_write_row""" + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + rule = IncrementRule(family, qualifier, increment) + result = target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert int(result[0]) == expected + assert self._retrieve_cell_value(target, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start,append,expected", + [ + (b"", b"", b""), + ("", "", b""), + (b"abc", b"123", b"abc123"), + (b"abc", "123", b"abc123"), + ("", b"1", b"1"), + (b"abc", "", b"abc"), + (b"hello", b"world", b"helloworld"), + ], + ) + def test_read_modify_write_row_append( + self, client, target, temp_rows, start, append, expected + ): + """test read_modify_write_row""" + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + rule = AppendValueRule(family, qualifier, append) + result = target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert result[0].value == expected + assert self._retrieve_cell_value(target, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + def test_read_modify_write_row_chained(self, client, target, temp_rows): + """test read_modify_write_row with multiple rules""" + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + start_amount = 1 + increment_amount = 10 + temp_rows.add_row( + row_key, value=start_amount, family=family, qualifier=qualifier + ) + rule = [ + IncrementRule(family, qualifier, increment_amount), + AppendValueRule(family, qualifier, "hello"), + AppendValueRule(family, qualifier, "world"), + AppendValueRule(family, qualifier, "!"), + ] + result = target.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert result[0].family == family + assert result[0].qualifier == qualifier + assert ( + result[0].value + == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + + b"helloworld!" + ) + assert self._retrieve_cell_value(target, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + @pytest.mark.parametrize( + "start_val,predicate_range,expected_result", + [(1, (0, 2), True), (-1, (0, 2), False)], + ) + def test_check_and_mutate( + self, client, target, temp_rows, start_val, predicate_range, expected_result + ): + """test that check_and_mutate_row works applies the right mutations, and returns the right result""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start_val, family=family, qualifier=qualifier) + false_mutation_value = b"false-mutation-value" + false_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value + ) + true_mutation_value = b"true-mutation-value" + true_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value + ) + predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) + result = target.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + assert result == expected_result + expected_value = ( + true_mutation_value if expected_result else false_mutation_value + ) + assert self._retrieve_cell_value(target, row_key) == expected_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("target") + def test_check_and_mutate_empty_request(self, client, target): + """check_and_mutate with no true or fale mutations should raise an error""" + from google.api_core import exceptions + + with pytest.raises(exceptions.InvalidArgument) as e: + target.check_and_mutate_row( + b"row_key", None, true_case_mutations=None, false_case_mutations=None + ) + assert "No mutations provided" in str(e.value) + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_stream(self, target, temp_rows): + """Ensure that the read_rows_stream method works""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + generator = target.read_rows_stream({}) + first_row = generator.__next__() + second_row = generator.__next__() + assert first_row.row_key == b"row_key_1" + assert second_row.row_key == b"row_key_2" + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + generator.__next__() + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows(self, target, temp_rows): + """Ensure that the read_rows method works""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + row_list = target.read_rows({}) + assert len(row_list) == 2 + assert row_list[0].row_key == b"row_key_1" + assert row_list[1].row_key == b"row_key_2" + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_simple(self, target, temp_rows): + """Test read rows sharded with two queries""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) + row_list = target.read_rows_sharded([query1, query2]) + assert len(row_list) == 4 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"b" + assert row_list[3].row_key == b"d" + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_from_sample(self, target, temp_rows): + """Test end-to-end sharding""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + table_shard_keys = target.sample_row_keys() + query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) + shard_queries = query.shard(table_shard_keys) + row_list = target.read_rows_sharded(shard_queries) + assert len(row_list) == 3 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"d" + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_filters_limits(self, target, temp_rows): + """Test read rows sharded with filters and limits""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + label_filter1 = ApplyLabelFilter("first") + label_filter2 = ApplyLabelFilter("second") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) + row_list = target.read_rows_sharded([query1, query2]) + assert len(row_list) == 3 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"b" + assert row_list[2].row_key == b"d" + assert row_list[0][0].labels == ["first"] + assert row_list[1][0].labels == ["second"] + assert row_list[2][0].labels == ["second"] + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_range_query(self, target, temp_rows): + """Ensure that the read_rows method works""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) + row_list = target.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_single_key_query(self, target, temp_rows): + """Ensure that the read_rows method works with specified query""" + from google.cloud.bigtable.data import ReadRowsQuery + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query = ReadRowsQuery(row_keys=[b"a", b"c"]) + row_list = target.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_with_filter(self, target, temp_rows): + """ensure filters are applied""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + expected_label = "test-label" + row_filter = ApplyLabelFilter(expected_label) + query = ReadRowsQuery(row_filter=row_filter) + row_list = target.read_rows(query) + assert len(row_list) == 4 + for row in row_list: + assert row[0].labels == [expected_label] + + @pytest.mark.usefixtures("target") + def test_read_rows_stream_close(self, target, temp_rows): + """Ensure that the read_rows_stream can be closed""" + from google.cloud.bigtable.data import ReadRowsQuery + + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + query = ReadRowsQuery() + generator = target.read_rows_stream(query) + first_row = generator.__next__() + assert first_row.row_key == b"row_key_1" + generator.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + generator.__next__() + + @pytest.mark.usefixtures("target") + def test_read_row(self, target, temp_rows): + """Test read_row (single row helper)""" + from google.cloud.bigtable.data import Row + + temp_rows.add_row(b"row_key_1", value=b"value") + row = target.read_row(b"row_key_1") + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("target") + def test_read_row_missing(self, target): + """Test read_row when row does not exist""" + from google.api_core import exceptions + + row_key = "row_key_not_exist" + result = target.read_row(row_key) + assert result is None + with pytest.raises(exceptions.InvalidArgument) as e: + target.read_row("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("target") + def test_read_row_w_filter(self, target, temp_rows): + """Test read_row (single row helper)""" + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"row_key_1", value=b"value") + expected_label = "test-label" + label_filter = ApplyLabelFilter(expected_label) + row = target.read_row(b"row_key_1", row_filter=label_filter) + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + assert row.cells[0].labels == [expected_label] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("target") + def test_row_exists(self, target, temp_rows): + from google.api_core import exceptions + + "Test row_exists with rows that exist and don't exist" + assert target.row_exists(b"row_key_1") is False + temp_rows.add_row(b"row_key_1") + assert target.row_exists(b"row_key_1") is True + assert target.row_exists("row_key_1") is True + assert target.row_exists(b"row_key_2") is False + assert target.row_exists("row_key_2") is False + assert target.row_exists("3") is False + temp_rows.add_row(b"3") + assert target.row_exists(b"3") is True + with pytest.raises(exceptions.InvalidArgument) as e: + target.row_exists("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @pytest.mark.parametrize( + "cell_value,filter_input,expect_match", + [ + (b"abc", b"abc", True), + (b"abc", "abc", True), + (b".", ".", True), + (".*", ".*", True), + (".*", b".*", True), + ("a", ".*", False), + (b".*", b".*", True), + ("\\a", "\\a", True), + (b"\xe2\x98\x83", "☃", True), + ("☃", "☃", True), + ("\\C☃", "\\C☃", True), + (1, 1, True), + (2, 1, False), + (68, 68, True), + ("D", 68, False), + (68, "D", False), + (-1, -1, True), + (2852126720, 2852126720, True), + (-1431655766, -1431655766, True), + (-1431655766, -1, False), + ], + ) + def test_literal_value_filter( + self, target, temp_rows, cell_value, filter_input, expect_match + ): + """Literal value filter does complex escaping on re2 strings. + Make sure inputs are properly interpreted by the server""" + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery + + f = LiteralValueFilter(filter_input) + temp_rows.add_row(b"row_key_1", value=cell_value) + query = ReadRowsQuery(row_filter=f) + row_list = target.read_rows(query) + assert len(row_list) == bool( + expect_match + ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + def test_authorized_view_unauthenticated( + self, client, authorized_view_id, instance_id, table_id + ): + """Requesting family outside authorized family_subset should raise exception""" + from google.cloud.bigtable.data.mutations import SetCell + + with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + mutation = SetCell(family="unauthorized", qualifier="q", new_value="v") + with pytest.raises(PermissionDenied) as e: + view.mutate_row(b"row-key", mutation) + assert "outside the Authorized View" in e.value.message + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_query_simple(self, client, table_id, instance_id): + result = client.execute_query("SELECT 1 AS a, 'foo' AS b", instance_id) + rows = [r for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["a"] == 1 + assert row["b"] == "foo" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_against_target( + self, client, instance_id, table_id, temp_rows, column_family_config + ): + temp_rows.add_row(b"row_key_1") + result = client.execute_query("SELECT * FROM `" + table_id + "`", instance_id) + rows = [r for r in result] + assert len(rows) == 1 + assert rows[0]["_key"] == b"row_key_1" + family_map = rows[0][TEST_FAMILY] + assert len(family_map) == 1 + assert family_map[b"q"] == b"test-value" + assert len(rows[0][TEST_FAMILY_2]) == 0 + md = result.metadata + assert len(md) == len(column_family_config) + 1 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_query_params(self, client, table_id, instance_id): + query = "SELECT @stringParam AS strCol, @bytesParam as bytesCol, @int64Param AS intCol, @float32Param AS float32Col, @float64Param AS float64Col, @boolParam AS boolCol, @tsParam AS tsCol, @dateParam AS dateCol, @byteArrayParam AS byteArrayCol, @stringArrayParam AS stringArrayCol, @intArrayParam AS intArrayCol, @float32ArrayParam AS float32ArrayCol, @float64ArrayParam AS float64ArrayCol, @boolArrayParam AS boolArrayCol, @tsArrayParam AS tsArrayCol, @dateArrayParam AS dateArrayCol" + parameters = { + "stringParam": "foo", + "bytesParam": b"bar", + "int64Param": 12, + "float32Param": 1.1, + "float64Param": 1.2, + "boolParam": True, + "tsParam": datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + "dateParam": datetime.date(2025, 1, 16), + "byteArrayParam": [b"foo", b"bar", None], + "stringArrayParam": ["foo", "bar", None], + "intArrayParam": [1, None, 2], + "float32ArrayParam": [1.2, None, 1.3], + "float64ArrayParam": [1.4, None, 1.5], + "boolArrayParam": [None, False, True], + "tsArrayParam": [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + "dateArrayParam": [ + datetime.date(2025, 1, 16), + datetime.date(2025, 1, 17), + None, + ], + } + param_types = { + "stringParam": SqlType.String(), + "bytesParam": SqlType.Bytes(), + "int64Param": SqlType.Int64(), + "float32Param": SqlType.Float32(), + "float64Param": SqlType.Float64(), + "boolParam": SqlType.Bool(), + "tsParam": SqlType.Timestamp(), + "dateParam": SqlType.Date(), + "byteArrayParam": SqlType.Array(SqlType.Bytes()), + "stringArrayParam": SqlType.Array(SqlType.String()), + "intArrayParam": SqlType.Array(SqlType.Int64()), + "float32ArrayParam": SqlType.Array(SqlType.Float32()), + "float64ArrayParam": SqlType.Array(SqlType.Float64()), + "boolArrayParam": SqlType.Array(SqlType.Bool()), + "tsArrayParam": SqlType.Array(SqlType.Timestamp()), + "dateArrayParam": SqlType.Array(SqlType.Date()), + } + result = client.execute_query( + query, instance_id, parameters=parameters, parameter_types=param_types + ) + rows = [r for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["strCol"] == parameters["stringParam"] + assert row["bytesCol"] == parameters["bytesParam"] + assert row["intCol"] == parameters["int64Param"] + assert row["float32Col"] == pytest.approx(parameters["float32Param"]) + assert row["float64Col"] == pytest.approx(parameters["float64Param"]) + assert row["boolCol"] == parameters["boolParam"] + assert row["tsCol"] == parameters["tsParam"] + assert row["dateCol"] == date_pb2.Date(year=2025, month=1, day=16) + assert row["stringArrayCol"] == parameters["stringArrayParam"] + assert row["byteArrayCol"] == parameters["byteArrayParam"] + assert row["intArrayCol"] == parameters["intArrayParam"] + assert row["float32ArrayCol"] == pytest.approx(parameters["float32ArrayParam"]) + assert row["float64ArrayCol"] == pytest.approx(parameters["float64ArrayParam"]) + assert row["boolArrayCol"] == parameters["boolArrayParam"] + assert row["tsArrayCol"] == parameters["tsArrayParam"] + assert row["dateArrayCol"] == [ + date_pb2.Date(year=2025, month=1, day=16), + date_pb2.Date(year=2025, month=1, day=17), + None, + ] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_metadata_on_empty_response( + self, client, instance_id, table_id, temp_rows, column_family_config + ): + temp_rows.add_row(b"row_key_1") + result = client.execute_query( + "SELECT * FROM `" + table_id + "` WHERE _key='non-existent'", instance_id + ) + rows = [r for r in result] + assert len(rows) == 0 + md = result.metadata + assert len(md) == len(column_family_config) + 1 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) diff --git a/tests/system/v2_client/__init__.py b/tests/system/v2_client/__init__.py new file mode 100644 index 000000000..4de65971c --- /dev/null +++ b/tests/system/v2_client/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/system/_helpers.py b/tests/system/v2_client/_helpers.py similarity index 88% rename from tests/system/_helpers.py rename to tests/system/v2_client/_helpers.py index 95261879e..e792def15 100644 --- a/tests/system/_helpers.py +++ b/tests/system/v2_client/_helpers.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime +from datetime import datetime, timezone import grpc from google.api_core import exceptions from google.cloud import exceptions as core_exceptions -from google.cloud._helpers import UTC from test_utils import retry @@ -41,7 +40,5 @@ def _retry_on_unavailable(exc): def label_stamp(): return ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") + datetime.now(timezone.utc).replace(microsecond=0).strftime("%Y-%m-%dt%H-%M-%S") ) diff --git a/tests/system/v2_client/conftest.py b/tests/system/v2_client/conftest.py new file mode 100644 index 000000000..f39fcba88 --- /dev/null +++ b/tests/system/v2_client/conftest.py @@ -0,0 +1,209 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest +from test_utils.system import unique_resource_id + +from google.cloud.bigtable.client import Client +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from . import _helpers + + +@pytest.fixture(scope="session") +def in_emulator(): + return os.getenv(BIGTABLE_EMULATOR) is not None + + +@pytest.fixture(scope="session") +def kms_key_name(): + return os.getenv("KMS_KEY_NAME") + + +@pytest.fixture(scope="session") +def with_kms_key_name(kms_key_name): + if kms_key_name is None: + pytest.skip("Test requires KMS_KEY_NAME environment variable") + return kms_key_name + + +@pytest.fixture(scope="session") +def skip_on_emulator(in_emulator): + if in_emulator: + pytest.skip("Emulator does not support this feature") + + +@pytest.fixture(scope="session") +def unique_suffix(): + return unique_resource_id("-") + + +@pytest.fixture(scope="session") +def location_id(): + return "us-central1-c" + + +@pytest.fixture(scope="session") +def serve_nodes(): + return 3 + + +@pytest.fixture(scope="session") +def label_key(): + return "python-system" + + +@pytest.fixture(scope="session") +def instance_labels(label_key): + return {label_key: _helpers.label_stamp()} + + +@pytest.fixture(scope="session") +def admin_client(): + return Client(admin=True) + + +@pytest.fixture(scope="session") +def service_account(admin_client): + from google.oauth2.service_account import Credentials + + if not isinstance(admin_client._credentials, Credentials): + pytest.skip("These tests require a service account credential") + return admin_client._credentials + + +@pytest.fixture(scope="session") +def admin_instance_id(unique_suffix): + return f"g-c-p{unique_suffix}" + + +@pytest.fixture(scope="session") +def admin_cluster_id(admin_instance_id): + return f"{admin_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def admin_instance(admin_client, admin_instance_id, instance_labels): + return admin_client.instance(admin_instance_id, labels=instance_labels) + + +@pytest.fixture(scope="session") +def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): + return admin_instance.cluster( + admin_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + ) + + +@pytest.fixture(scope="session") +def admin_cluster_with_autoscaling( + admin_instance, + admin_cluster_id, + location_id, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, +): + return admin_instance.cluster( + admin_cluster_id, + location_id=location_id, + min_serve_nodes=min_serve_nodes, + max_serve_nodes=max_serve_nodes, + cpu_utilization_percent=cpu_utilization_percent, + ) + + +@pytest.fixture(scope="session") +def admin_instance_populated(admin_instance, admin_cluster, in_emulator): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + operation = admin_instance.create(clusters=[admin_cluster]) + operation.result(timeout=240) + + yield admin_instance + + if not in_emulator: + _helpers.retry_429(admin_instance.delete)() + + +@pytest.fixture(scope="session") +def data_client(): + return Client(admin=False) + + +@pytest.fixture(scope="session") +def data_instance_id(unique_suffix): + return f"g-c-p-d{unique_suffix}" + + +@pytest.fixture(scope="session") +def data_cluster_id(data_instance_id): + return f"{data_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def data_instance_populated( + admin_client, + data_instance_id, + instance_labels, + data_cluster_id, + location_id, + serve_nodes, + in_emulator, +): + instance = admin_client.instance(data_instance_id, labels=instance_labels) + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + cluster = instance.cluster( + data_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + ) + operation = instance.create(clusters=[cluster]) + operation.result(timeout=240) + + yield instance + + if not in_emulator: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="function") +def instances_to_delete(): + instances_to_delete = [] + + yield instances_to_delete + + for instance in instances_to_delete: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="session") +def min_serve_nodes(in_emulator): + return 1 + + +@pytest.fixture(scope="session") +def max_serve_nodes(in_emulator): + return 8 + + +@pytest.fixture(scope="session") +def cpu_utilization_percent(in_emulator): + return 10 diff --git a/tests/system/test_data_api.py b/tests/system/v2_client/test_data_api.py similarity index 88% rename from tests/system/test_data_api.py rename to tests/system/v2_client/test_data_api.py index 2ca7e1504..c012eb32a 100644 --- a/tests/system/test_data_api.py +++ b/tests/system/v2_client/test_data_api.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime +from datetime import datetime, timedelta, timezone import operator import pytest @@ -62,8 +62,8 @@ def rows_to_delete(): def test_table_read_rows_filter_millis(data_table): from google.cloud.bigtable import row_filters - end = datetime.datetime.now() - start = end - datetime.timedelta(minutes=60) + end = datetime.now() + start = end - timedelta(minutes=60) timestamp_range = row_filters.TimestampRange(start=start, end=end) timefilter = row_filters.TimestampRangeFilter(timestamp_range) row_data = data_table.read_rows(filter_=timefilter) @@ -233,20 +233,19 @@ def test_table_read_row_large_cell(data_table, rows_to_delete, skip_on_emulator) def _write_to_row(row1, row2, row3, row4): from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime - from google.cloud._helpers import UTC from google.cloud.bigtable.row_data import Cell - timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp1 = datetime.now(timezone.utc) timestamp1_micros = _microseconds_from_datetime(timestamp1) # Truncate to millisecond granularity. timestamp1_micros -= timestamp1_micros % 1000 timestamp1 = _datetime_from_microseconds(timestamp1_micros) # 1000 microseconds is a millisecond - timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp2 = timestamp1 + timedelta(microseconds=1000) timestamp2_micros = _microseconds_from_datetime(timestamp2) - timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp3 = timestamp1 + timedelta(microseconds=2000) timestamp3_micros = _microseconds_from_datetime(timestamp3) - timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + timestamp4 = timestamp1 + timedelta(microseconds=3000) timestamp4_micros = _microseconds_from_datetime(timestamp4) if row1 is not None: @@ -381,3 +380,39 @@ def test_access_with_non_admin_client(data_client, data_instance_id, data_table_ instance = data_client.instance(data_instance_id) table = instance.table(data_table_id) assert table.read_row("nonesuch") is None # no raise + + +def test_mutations_batcher_threading(data_table, rows_to_delete): + """ + Test the mutations batcher by sending a bunch of mutations using different + flush methods + """ + import mock + import time + from google.cloud.bigtable.batcher import MutationsBatcher + + num_sent = 20 + all_results = [] + + def callback(results): + all_results.extend(results) + + # override flow control max elements + with mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", 2): + with MutationsBatcher( + data_table, + flush_count=5, + flush_interval=0.07, + batch_completed_callback=callback, + ) as batcher: + # send mutations in a way that timed flushes and count flushes interleave + for i in range(num_sent): + row = data_table.direct_row("row{}".format(i)) + row.set_cell( + COLUMN_FAMILY_ID1, COL_NAME1, "val{}".format(i).encode("utf-8") + ) + rows_to_delete.append(row) + batcher.mutate(row) + time.sleep(0.01) + # ensure all mutations were sent + assert len(all_results) == num_sent diff --git a/tests/system/test_instance_admin.py b/tests/system/v2_client/test_instance_admin.py similarity index 99% rename from tests/system/test_instance_admin.py rename to tests/system/v2_client/test_instance_admin.py index e5e311213..bd5c7e912 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/v2_client/test_instance_admin.py @@ -28,7 +28,6 @@ def _create_app_profile_helper( allow_transactional_writes=None, ignore_warnings=None, ): - app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, diff --git a/tests/system/test_table_admin.py b/tests/system/v2_client/test_table_admin.py similarity index 100% rename from tests/system/test_table_admin.py rename to tests/system/v2_client/test_table_admin.py diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index 89a37dc92..cbf94b283 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/admin_overlay/my_oneof_message.py b/tests/unit/admin_overlay/my_oneof_message.py new file mode 100644 index 000000000..25667cfca --- /dev/null +++ b/tests/unit/admin_overlay/my_oneof_message.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto + +from google.cloud.bigtable_admin_v2.utils import oneof_message + +__protobuf__ = proto.module( + package="test.oneof.v1", + manifest={ + "MyOneofMessage", + }, +) + + +# Foo and Bar belong to oneof foobar, and baz is independent. +class MyOneofMessage(oneof_message.OneofMessage): + foo: int = proto.Field( + proto.INT32, + number=1, + oneof="foobar", + ) + + bar: int = proto.Field( + proto.INT32, + number=2, + oneof="foobar", + ) + + baz: int = proto.Field( + proto.INT32, + number=3, + ) diff --git a/tests/unit/admin_overlay/test_admin_packaging.py b/tests/unit/admin_overlay/test_admin_packaging.py new file mode 100644 index 000000000..729a92b5c --- /dev/null +++ b/tests/unit/admin_overlay/test_admin_packaging.py @@ -0,0 +1,41 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +import pytest + + +@pytest.mark.parametrize( + "module", ["google.cloud.bigtable_admin", "google.cloud.bigtable_admin_v2"] +) +def test_admin_overlay_imports(module): + # Simulate from import dynamically using importlib + mod = importlib.import_module(module) + + # Check that the import aliasing works as expected for overlay/autogenerated clients/types. + classes_to_modules = { + "BigtableTableAdminClient": "google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client", + "RestoreTableOperation": "google.cloud.bigtable_admin_v2.overlay.types.restore_table", + "BigtableInstanceAdminClient": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client", + "RestoreTableRequest": "google.cloud.bigtable_admin_v2.types.bigtable_table_admin", + } + + for cls_name, submodule_name in classes_to_modules.items(): + cls = getattr(mod, cls_name) + submodule = importlib.import_module(submodule_name) + assert cls == getattr(submodule, cls_name) + + # Check that from import * has the class inside. + assert cls_name in mod.__all__ diff --git a/tests/unit/admin_overlay/test_async_client.py b/tests/unit/admin_overlay/test_async_client.py new file mode 100644 index 000000000..0d844a9e4 --- /dev/null +++ b/tests/unit/admin_overlay/test_async_client.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client import ( + BigtableTableAdminAsyncClient, + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + async_restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable import __version__ as bigtable_version + +from test_async_consistency import ( + FALSE_CONSISTENCY_RESPONSE, + TRUE_CONSISTENCY_RESPONSE, +) + +import pytest + + +PARENT_NAME = "my_parent" +TABLE_NAME = "my_table" +CONSISTENCY_TOKEN = "abcdefg" + + +def _make_client(**kwargs): + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + return BigtableTableAdminAsyncClient(**kwargs) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + ( + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_async_client_client_version( + transport_class, transport_name +): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + _make_client(transport=transport_name) + + # call_args.kwargs is not supported in Python 3.7, so find them from the tuple + # instead. It's always the last item in the call_args tuple. + transport_init_call_kwargs = patched.call_args[-1] + assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO + + assert ( + DEFAULT_CLIENT_INFO.client_library_version + == f"{bigtable_version}-admin-overlay-async" + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "kwargs", + [ + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ) + }, + { + "request": { + "parent": PARENT_NAME, + "table_id": TABLE_NAME, + }, + }, + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + ], +) +async def test_bigtable_table_admin_async_client_restore_table(kwargs): + client = _make_client() + + with mock.patch.object( + async_restore_table, "AsyncRestoreTableOperation", new_callable=mock.AsyncMock + ) as future_mock: + with mock.patch.object( + client._client, "_transport", new_callable=mock.AsyncMock + ) as transport_mock: + with mock.patch.object( + client, "_restore_table", new_callable=mock.AsyncMock + ) as restore_table_mock: + operation_mock = mock.Mock() + restore_table_mock.return_value = operation_mock + await client.restore_table(**kwargs) + + restore_table_mock.assert_called_once_with( + request=kwargs["request"], + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + future_mock.assert_called_once_with( + transport_mock.operations_client, operation_mock + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "kwargs,check_consistency_request_extras", + [ + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(), + ) + }, + { + "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(), + }, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(), + ) + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "request": { + "name": TABLE_NAME, + "data_boost_read_local_writes": {}, + } + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "name": TABLE_NAME, + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + {}, + ), + ], +) +async def test_bigtable_table_admin_async_client_wait_for_consistency( + kwargs, check_consistency_request_extras +): + client = _make_client() + poll_count = 3 + check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1) + check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE) + + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + with mock.patch.object( + client, "check_consistency", new_callable=mock.AsyncMock + ) as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = check_mock_side_effect + result = await client.wait_for_consistency(**kwargs) + + assert result is True + + generate_mock.assert_awaited_once_with( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=TABLE_NAME, + ), + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + expected_check_consistency_request = ( + bigtable_table_admin.CheckConsistencyRequest( + name=TABLE_NAME, + consistency_token=CONSISTENCY_TOKEN, + **check_consistency_request_extras, + ) + ) + + check_mock.assert_awaited_with( + expected_check_consistency_request, + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + +@pytest.mark.asyncio +async def test_bigtable_table_admin_async_client_wait_for_consistency_error_in_call(): + client = _make_client() + request = wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + generate_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + await client.wait_for_consistency(request) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + with mock.patch.object( + client, "check_consistency", new_callable=mock.AsyncMock + ) as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + await client.wait_for_consistency(request) + + +@pytest.mark.asyncio +async def test_bigtable_table_admin_async_client_wait_for_consistency_user_error(): + client = _make_client() + with pytest.raises(ValueError): + await client.wait_for_consistency( + { + "name": TABLE_NAME, + }, + name=TABLE_NAME, + ) diff --git a/tests/unit/admin_overlay/test_async_consistency.py b/tests/unit/admin_overlay/test_async_consistency.py new file mode 100644 index 000000000..b64ae1a11 --- /dev/null +++ b/tests/unit/admin_overlay/test_async_consistency.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.cloud.bigtable_admin_v2.overlay.types import async_consistency +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +import pytest + + +TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=True +) + +FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=False +) + + +def async_mock_check_consistency_callable(max_poll_count=1): + # Return False max_poll_count - 1 times, then True, for a total of + # max_poll_count calls. + side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1) + side_effect.append(TRUE_CONSISTENCY_RESPONSE) + return mock.AsyncMock(spec=["__call__"], side_effect=side_effect) + + +@pytest.mark.asyncio +async def test_check_consistency_future_cancel(): + check_consistency_call = async_mock_check_consistency_callable() + future = async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + with pytest.raises(NotImplementedError): + future.cancel() + + with pytest.raises(NotImplementedError): + future.cancelled() + + +@pytest.mark.asyncio +async def test_check_consistency_future_result(): + times = 5 + check_consistency_call = async_mock_check_consistency_callable(times) + future = async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + is_consistent = await future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) + + # Check that calling result again doesn't produce more calls. + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) diff --git a/tests/unit/admin_overlay/test_async_restore_table.py b/tests/unit/admin_overlay/test_async_restore_table.py new file mode 100644 index 000000000..95799fc14 --- /dev/null +++ b/tests/unit/admin_overlay/test_async_restore_table.py @@ -0,0 +1,248 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.longrunning import operations_pb2 +from google.rpc import status_pb2, code_pb2 + +from google.api_core import operation_async, exceptions +from google.api_core.future import async_future +from google.api_core.operations_v1 import operations_async_client +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table +from google.cloud.bigtable_admin_v2.overlay.types import async_restore_table + +import pytest + + +# Set up the mock operations +DEFAULT_MAX_POLL = 3 +RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table" +RESTORE_TABLE_OPERATION_NAME = "test/restore_table" +RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table" +OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg" +RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status( + code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded" +) + + +def make_operation_proto( + name, done=False, metadata=None, response=None, error=None, **kwargs +): + operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs) + + if metadata is not None: + operation_proto.metadata.Pack(metadata._pb) + + if response is not None: + operation_proto.response.Pack(response._pb) + + if error is not None: + operation_proto.error.CopyFrom(error) + + return operation_proto + + +RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=False, + metadata=RESTORE_TABLE_OPERATION_METADATA, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME, + metadata=OPTIMIZE_RESTORED_TABLE_METADATA, +) + + +# Set up the mock operation client +def mock_restore_table_operation( + max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True +): + client = mock.AsyncMock(spec=operations_async_client.OperationsAsyncClient) + + # Set up the polling + side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1) + finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata() + bigtable_table_admin.RestoreTableMetadata.copy_from( + finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA + ) + if has_optimize_operation: + finished_operation_metadata.optimize_table_operation_name = ( + OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + + if fail: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + error=RESTORE_TABLE_OPERATION_FINISHED_ERROR, + ) + else: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE, + ) + side_effect.append(final_operation_proto) + refresh = mock.AsyncMock(spec=["__call__"], side_effect=side_effect) + cancel = mock.AsyncMock(spec=["__call__"]) + future = operation_async.AsyncOperation( + RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO, + refresh, + cancel, + result_type=table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Set up the optimize_restore_table_operation + client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO] + + return async_restore_table.AsyncRestoreTableOperation(client, future) + + +@pytest.mark.asyncio +async def test_async_restore_table_operation_client_success_has_optimize(): + restore_table_operation = mock_restore_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_client_success_has_optimize_multiple_calls(): + restore_table_operation = mock_restore_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + await restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_success_has_optimize_call_before_done(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + await restore_table_operation.optimize_restored_table_operation() + + restore_table_operation._operations_client.get_operation.assert_not_called() + + +@pytest.mark.asyncio +async def test_restore_table_operation_client_success_only_cache_after_finishing(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + await restore_table_operation.optimize_restored_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_success_no_optimize(): + restore_table_operation = mock_restore_table_operation(has_optimize_operation=False) + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() + + +@pytest.mark.asyncio +async def test_restore_table_operation_exception(): + restore_table_operation = mock_restore_table_operation( + fail=True, has_optimize_operation=False + ) + + with pytest.raises(exceptions.GoogleAPICallError): + await restore_table_operation.result() + + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() diff --git a/tests/unit/admin_overlay/test_client.py b/tests/unit/admin_overlay/test_client.py new file mode 100644 index 000000000..07922b349 --- /dev/null +++ b/tests/unit/admin_overlay/test_client.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client import ( + BigtableTableAdminClient, + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable import __version__ as bigtable_version + +from test_consistency import ( + FALSE_CONSISTENCY_RESPONSE, + TRUE_CONSISTENCY_RESPONSE, +) + +import pytest + + +PARENT_NAME = "my_parent" +TABLE_NAME = "my_table" +CONSISTENCY_TOKEN = "abcdefg" + + +def _make_client(**kwargs): + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + return BigtableTableAdminClient(**kwargs) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + ( + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + transports.BigtableTableAdminRestTransport, + "rest", + ), + ], +) +def test_bigtable_table_admin_client_client_version(transport_class, transport_name): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + _make_client(transport=transport_name) + + # call_args.kwargs is not supported in Python 3.7, so find them from the tuple + # instead. It's always the last item in the call_args tuple. + transport_init_call_kwargs = patched.call_args[-1] + assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO + + assert ( + DEFAULT_CLIENT_INFO.client_library_version + == f"{bigtable_version}-admin-overlay" + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ) + }, + { + "request": { + "parent": PARENT_NAME, + "table_id": TABLE_NAME, + }, + }, + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + ], +) +def test_bigtable_table_admin_client_restore_table(kwargs): + client = _make_client() + + with mock.patch.object(restore_table, "RestoreTableOperation") as future_mock: + with mock.patch.object(client, "_transport") as transport_mock: + with mock.patch.object(client, "_restore_table") as restore_table_mock: + operation_mock = mock.Mock() + restore_table_mock.return_value = operation_mock + client.restore_table(**kwargs) + + restore_table_mock.assert_called_once_with( + request=kwargs["request"], + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + future_mock.assert_called_once_with( + transport_mock.operations_client, operation_mock + ) + + +@pytest.mark.parametrize( + "kwargs,check_consistency_request_extras", + [ + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(), + ) + }, + { + "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(), + }, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(), + ) + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "request": { + "name": TABLE_NAME, + "data_boost_read_local_writes": {}, + } + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "name": TABLE_NAME, + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + {}, + ), + ], +) +def test_bigtable_table_admin_client_wait_for_consistency( + kwargs, check_consistency_request_extras +): + client = _make_client() + poll_count = 3 + check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1) + check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE) + + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + with mock.patch.object(client, "check_consistency") as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = check_mock_side_effect + result = client.wait_for_consistency(**kwargs) + + assert result is True + + generate_mock.assert_called_once_with( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=TABLE_NAME, + ), + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + expected_check_consistency_request = ( + bigtable_table_admin.CheckConsistencyRequest( + name=TABLE_NAME, + consistency_token=CONSISTENCY_TOKEN, + **check_consistency_request_extras, + ) + ) + + check_mock.assert_called_with( + expected_check_consistency_request, + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + +def test_bigtable_table_admin_client_wait_for_consistency_error_in_call(): + client = _make_client() + request = wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + generate_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + client.wait_for_consistency(request) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + with mock.patch.object(client, "check_consistency") as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + client.wait_for_consistency(request) + + +def test_bigtable_table_admin_client_wait_for_consistency_user_error(): + client = _make_client() + with pytest.raises(ValueError): + client.wait_for_consistency( + { + "name": TABLE_NAME, + }, + name=TABLE_NAME, + ) diff --git a/tests/unit/admin_overlay/test_consistency.py b/tests/unit/admin_overlay/test_consistency.py new file mode 100644 index 000000000..29bc0c481 --- /dev/null +++ b/tests/unit/admin_overlay/test_consistency.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.cloud.bigtable_admin_v2.overlay.types import consistency +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +import pytest + + +TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=True +) + +FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=False +) + + +def mock_check_consistency_callable(max_poll_count=1): + # Return False max_poll_count - 1 times, then True, for a total of + # max_poll_count calls. + side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1) + side_effect.append(TRUE_CONSISTENCY_RESPONSE) + return mock.Mock(spec=["__call__"], side_effect=side_effect) + + +def test_check_consistency_future_cancel(): + check_consistency_call = mock_check_consistency_callable() + future = consistency._CheckConsistencyPollingFuture(check_consistency_call) + with pytest.raises(NotImplementedError): + future.cancel() + + with pytest.raises(NotImplementedError): + future.cancelled() + + +def test_check_consistency_future_result(): + times = 5 + check_consistency_call = mock_check_consistency_callable(times) + future = consistency._CheckConsistencyPollingFuture(check_consistency_call) + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) + + # Check that calling result again doesn't produce more calls. + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) diff --git a/tests/unit/admin_overlay/test_oneof_message.py b/tests/unit/admin_overlay/test_oneof_message.py new file mode 100644 index 000000000..b9c521235 --- /dev/null +++ b/tests/unit/admin_overlay/test_oneof_message.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_admin_v2.types import GcRule +from google.protobuf import duration_pb2 + +import my_oneof_message + +import pytest + + +# The following proto bytestring was constructed running printproto in +# text-to-binary mode on the following textproto for GcRule: +# +# intersection { +# rules { +# max_num_versions: 1234 +# } +# rules { +# max_age { +# seconds: 12345 +# } +# } +# } +GCRULE_RAW_PROTO_BYTESTRING = b"\x1a\x0c\n\x03\x08\xd2\t\n\x05\x12\x03\x08\xb9`" +INITIAL_VALUE = 123 +FINAL_VALUE = 456 + + +@pytest.fixture +def default_msg(): + return my_oneof_message.MyOneofMessage() + + +@pytest.fixture +def foo_msg(): + return my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE) + + +def test_oneof_message_setattr_oneof_no_conflict(default_msg): + default_msg.foo = INITIAL_VALUE + default_msg.baz = INITIAL_VALUE + assert default_msg.foo == INITIAL_VALUE + assert default_msg.baz == INITIAL_VALUE + assert not default_msg.bar + + +def test_oneof_message_setattr_conflict(default_msg, foo_msg): + with pytest.raises(ValueError): + foo_msg.bar = INITIAL_VALUE + assert foo_msg.foo == INITIAL_VALUE + assert not foo_msg.bar + + default_msg.bar = INITIAL_VALUE + with pytest.raises(ValueError): + default_msg.foo = INITIAL_VALUE + assert default_msg.bar == INITIAL_VALUE + assert not default_msg.foo + + +def test_oneof_message_setattr_oneof_same_oneof_field(default_msg, foo_msg): + foo_msg.foo = FINAL_VALUE + assert foo_msg.foo == FINAL_VALUE + assert not foo_msg.bar + + default_msg.bar = INITIAL_VALUE + default_msg.bar = FINAL_VALUE + assert default_msg.bar == FINAL_VALUE + assert not default_msg.foo + + +def test_oneof_message_setattr_oneof_delattr(foo_msg): + del foo_msg.foo + foo_msg.bar = INITIAL_VALUE + assert foo_msg.bar == INITIAL_VALUE + assert not foo_msg.foo + + +def test_oneof_message_init_oneof_conflict(foo_msg): + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, bar=INITIAL_VALUE) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + "bar": INITIAL_VALUE, + } + ) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo_msg._pb, bar=INITIAL_VALUE) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo_msg, bar=INITIAL_VALUE) + + +def test_oneof_message_init_oneof_no_conflict(foo_msg): + msg = my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + "baz": INITIAL_VALUE, + } + ) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage(foo_msg, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage(foo_msg._pb, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + +def test_oneof_message_init_kwargs_override_same_field_oneof(foo_msg): + # Kwargs take precedence over mapping, and this should be OK + msg = my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + }, + foo=FINAL_VALUE, + ) + assert msg.foo == FINAL_VALUE + + msg = my_oneof_message.MyOneofMessage(foo_msg, foo=FINAL_VALUE) + assert msg.foo == FINAL_VALUE + + msg = my_oneof_message.MyOneofMessage(foo_msg._pb, foo=FINAL_VALUE) + assert msg.foo == FINAL_VALUE + + +def test_gcrule_serialize_deserialize(): + test = GcRule( + intersection=GcRule.Intersection( + rules=[ + GcRule(max_num_versions=1234), + GcRule(max_age=duration_pb2.Duration(seconds=12345)), + ] + ) + ) + assert GcRule.serialize(test) == GCRULE_RAW_PROTO_BYTESTRING + assert GcRule.deserialize(GCRULE_RAW_PROTO_BYTESTRING) == test diff --git a/tests/unit/admin_overlay/test_restore_table.py b/tests/unit/admin_overlay/test_restore_table.py new file mode 100644 index 000000000..23c6609e4 --- /dev/null +++ b/tests/unit/admin_overlay/test_restore_table.py @@ -0,0 +1,230 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.longrunning import operations_pb2 +from google.rpc import status_pb2, code_pb2 + +from google.api_core import operation, exceptions +from google.api_core.operations_v1 import operations_client +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table +from google.cloud.bigtable_admin_v2.overlay.types import restore_table + +import pytest + + +# Set up the mock operations +DEFAULT_MAX_POLL = 3 +RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table" +RESTORE_TABLE_OPERATION_NAME = "test/restore_table" +RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table" +OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg" +RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status( + code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded" +) + + +def make_operation_proto( + name, done=False, metadata=None, response=None, error=None, **kwargs +): + operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs) + + if metadata is not None: + operation_proto.metadata.Pack(metadata._pb) + + if response is not None: + operation_proto.response.Pack(response._pb) + + if error is not None: + operation_proto.error.CopyFrom(error) + + return operation_proto + + +RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=False, + metadata=RESTORE_TABLE_OPERATION_METADATA, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME, + metadata=OPTIMIZE_RESTORED_TABLE_METADATA, +) + + +# Set up the mock operation client +def mock_restore_table_operation( + max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True +): + client = mock.Mock(spec=operations_client.OperationsClient) + + # Set up the polling + side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1) + finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata() + bigtable_table_admin.RestoreTableMetadata.copy_from( + finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA + ) + if has_optimize_operation: + finished_operation_metadata.optimize_table_operation_name = ( + OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + + if fail: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + error=RESTORE_TABLE_OPERATION_FINISHED_ERROR, + ) + else: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE, + ) + side_effect.append(final_operation_proto) + refresh = mock.Mock(spec=["__call__"], side_effect=side_effect) + cancel = mock.Mock(spec=["__call__"]) + future = operation.Operation( + RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO, + refresh, + cancel, + result_type=table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Set up the optimize_restore_table_operation + client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO] + + return restore_table.RestoreTableOperation(client, future) + + +def test_restore_table_operation_client_success_has_optimize(): + restore_table_operation = mock_restore_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_client_success_has_optimize_multiple_calls(): + restore_table_operation = mock_restore_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_success_has_optimize_call_before_done(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + restore_table_operation.optimize_restored_table_operation() + + restore_table_operation._operations_client.get_operation.assert_not_called() + + +def test_restore_table_operation_client_success_only_cache_after_finishing(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + restore_table_operation.optimize_restored_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_success_no_optimize(): + restore_table_operation = mock_restore_table_operation(has_optimize_operation=False) + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() + + +def test_restore_table_operation_exception(): + restore_table_operation = mock_restore_table_operation( + fail=True, has_optimize_operation=False + ) + + with pytest.raises(exceptions.GoogleAPICallError): + restore_table_operation.result() + + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() diff --git a/setup.cfg b/tests/unit/data/__init__.py similarity index 82% rename from setup.cfg rename to tests/unit/data/__init__.py index 052350089..89a37dc92 100644 --- a/setup.cfg +++ b/tests/unit/data/__init__.py @@ -1,19 +1,15 @@ # -*- coding: utf-8 -*- -# # Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[bdist_wheel] -universal = 1 +# diff --git a/tests/unit/data/_async/__init__.py b/tests/unit/data/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/_async/test__mutate_rows.py b/tests/unit/data/_async/test__mutate_rows.py new file mode 100644 index 000000000..f14fa6dee --- /dev/null +++ b/tests/unit/data/_async/test__mutate_rows.py @@ -0,0 +1,364 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.rpc import status_pb2 +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import Forbidden + +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__mutate_rows" + + +@CrossSync.convert_class("TestMutateRowsOperation") +class TestMutateRowsOperationAsync: + def _target_class(self): + return CrossSync._MutateRowsOperation + + def _make_one(self, *args, **kwargs): + if not args: + fake_target = CrossSync.Mock() + fake_target._request_path = {"table_name": "table"} + fake_target.app_profile_id = None + kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) + kwargs["target"] = kwargs.pop("target", fake_target) + kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) + kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) + kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) + kwargs["mutation_entries"] = kwargs.pop("mutation_entries", []) + return self._target_class()(*args, **kwargs) + + def _make_mutation(self, count=1, size=1): + mutation = RowMutationEntry("k", [DeleteAllFromRow() for _ in range(count)]) + mutation.size = lambda: size + return mutation + + @CrossSync.convert + async def _mock_stream(self, mutation_list, error_dict): + for idx, entry in enumerate(mutation_list): + code = error_dict.get(idx, 0) + yield MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=idx, status=status_pb2.Status(code=code) + ) + ] + ) + + def _make_mock_gapic(self, mutation_list, error_dict=None): + mock_fn = CrossSync.Mock() + if error_dict is None: + error_dict = {} + mock_fn.side_effect = lambda *args, **kwargs: self._mock_stream( + mutation_list, error_dict + ) + return mock_fn + + def test_ctor(self): + """ + test that constructor sets all the attributes correctly + """ + from google.cloud.bigtable.data._async._mutate_rows import _EntryWithProto + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import Aborted + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + attempt_timeout = 0.01 + retryable_exceptions = () + instance = self._make_one( + client, + table, + entries, + operation_timeout, + attempt_timeout, + retryable_exceptions, + ) + # running gapic_fn should trigger a client call with baked-in args + assert client.mutate_rows.call_count == 0 + instance._gapic_fn() + assert client.mutate_rows.call_count == 1 + # entries should be passed down + entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] + assert instance.mutations == entries_w_pb + # timeout_gen should generate per-attempt timeout + assert next(instance.timeout_generator) == attempt_timeout + # ensure predicate is set + assert instance.is_retryable is not None + assert instance.is_retryable(DeadlineExceeded("")) is False + assert instance.is_retryable(Aborted("")) is False + assert instance.is_retryable(_MutateRowsIncomplete("")) is True + assert instance.is_retryable(RuntimeError("")) is False + assert instance.remaining_indices == list(range(len(entries))) + assert instance.errors == {} + + def test_ctor_too_many_entries(self): + """ + should raise an error if an operation is created with more than 100,000 entries + """ + from google.cloud.bigtable.data._async._mutate_rows import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100_000 + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] * (_MUTATE_ROWS_REQUEST_MUTATION_LIMIT + 1) + operation_timeout = 0.05 + attempt_timeout = 0.01 + with pytest.raises(ValueError) as e: + self._make_one( + client, + table, + entries, + operation_timeout, + attempt_timeout, + ) + assert "mutate_rows requests can contain at most 100000 mutations" in str( + e.value + ) + assert "Found 100001" in str(e.value) + + @CrossSync.pytest + async def test_mutate_rows_operation(self): + """ + Test successful case of mutate_rows_operation + """ + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + cls = self._target_class() + with mock.patch( + f"{cls.__module__}.{cls.__name__}._run_attempt", CrossSync.Mock() + ) as attempt_mock: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + assert attempt_mock.call_count == 1 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + @CrossSync.pytest + async def test_mutate_rows_attempt_exception(self, exc_type): + """ + exceptions raised from attempt should be raised in MutationsExceptionGroup + """ + client = CrossSync.Mock() + table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_exception = exc_type("test") + client.mutate_rows.side_effect = expected_exception + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance._run_attempt() + except Exception as e: + found_exc = e + assert client.mutate_rows.call_count == 1 + assert type(found_exc) is exc_type + assert found_exc == expected_exception + assert len(instance.errors) == 2 + assert len(instance.remaining_indices) == 0 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + @CrossSync.pytest + async def test_mutate_rows_exception(self, exc_type): + """ + exceptions raised from retryable should be raised in MutationsExceptionGroup + """ + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_cause = exc_type("abort") + with mock.patch.object( + self._target_class(), + "_run_attempt", + CrossSync.Mock(), + ) as attempt_mock: + attempt_mock.side_effect = expected_cause + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count == 1 + assert len(found_exc.exceptions) == 2 + assert isinstance(found_exc.exceptions[0], FailedMutationEntryError) + assert isinstance(found_exc.exceptions[1], FailedMutationEntryError) + assert found_exc.exceptions[0].__cause__ == expected_cause + assert found_exc.exceptions[1].__cause__ == expected_cause + + @pytest.mark.parametrize( + "exc_type", + [DeadlineExceeded, RuntimeError], + ) + @CrossSync.pytest + async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): + """ + If an exception fails but eventually passes, it should not raise an exception + """ + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 1 + expected_cause = exc_type("retry") + num_retries = 2 + with mock.patch.object( + self._target_class(), + "_run_attempt", + CrossSync.Mock(), + ) as attempt_mock: + attempt_mock.side_effect = [expected_cause] * num_retries + [None] + instance = self._make_one( + client, + table, + entries, + operation_timeout, + operation_timeout, + retryable_exceptions=(exc_type,), + ) + await instance.start() + assert attempt_mock.call_count == num_retries + 1 + + @CrossSync.pytest + async def test_mutate_rows_incomplete_ignored(self): + """ + MutateRowsIncomplete exceptions should not be added to error list + """ + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 0.05 + with mock.patch.object( + self._target_class(), + "_run_attempt", + CrossSync.Mock(), + ) as attempt_mock: + attempt_mock.side_effect = _MutateRowsIncomplete("ignored") + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count > 0 + assert len(found_exc.exceptions) == 1 + assert isinstance(found_exc.exceptions[0].__cause__, DeadlineExceeded) + + @CrossSync.pytest + async def test_run_attempt_single_entry_success(self): + """Test mutating a single entry""" + mutation = self._make_mutation() + expected_timeout = 1.3 + mock_gapic_fn = self._make_mock_gapic({0: mutation}) + instance = self._make_one( + mutation_entries=[mutation], + attempt_timeout=expected_timeout, + ) + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + await instance._run_attempt() + assert len(instance.remaining_indices) == 0 + assert mock_gapic_fn.call_count == 1 + _, kwargs = mock_gapic_fn.call_args + assert kwargs["timeout"] == expected_timeout + request = kwargs["request"] + assert request.entries == [mutation._to_pb()] + + @CrossSync.pytest + async def test_run_attempt_empty_request(self): + """Calling with no mutations should result in no API calls""" + mock_gapic_fn = self._make_mock_gapic([]) + instance = self._make_one( + mutation_entries=[], + ) + await instance._run_attempt() + assert mock_gapic_fn.call_count == 0 + + @CrossSync.pytest + async def test_run_attempt_partial_success_retryable(self): + """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one( + mutation_entries=mutations, + ) + instance.is_retryable = lambda x: True + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + with pytest.raises(_MutateRowsIncomplete): + await instance._run_attempt() + assert instance.remaining_indices == [1] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors + + @CrossSync.pytest + async def test_run_attempt_partial_success_non_retryable(self): + """Some entries succeed, but one fails. Exception marked as non-retryable. Do not raise incomplete error""" + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one( + mutation_entries=mutations, + ) + instance.is_retryable = lambda x: False + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + await instance._run_attempt() + assert instance.remaining_indices == [] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors diff --git a/tests/unit/data/_async/test__read_rows.py b/tests/unit/data/_async/test__read_rows.py new file mode 100644 index 000000000..c43f46d5a --- /dev/null +++ b/tests/unit/data/_async/test__read_rows.py @@ -0,0 +1,388 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__read_rows" + + +@CrossSync.convert_class( + sync_name="TestReadRowsOperation", +) +class TestReadRowsOperationAsync: + """ + Tests helper functions in the ReadRowsOperation class + in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt + is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests + """ + + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync._ReadRowsOperation + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + from google.cloud.bigtable.data import ReadRowsQuery + + row_limit = 91 + query = ReadRowsQuery(limit=row_limit) + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + table = mock.Mock() + table._client = client + table._request_path = {"table_name": "test_table"} + table.app_profile_id = "test_profile" + expected_operation_timeout = 42 + expected_request_timeout = 44 + time_gen_mock = mock.Mock() + subpath = "_async" if CrossSync.is_async else "_sync_autogen" + with mock.patch( + f"google.cloud.bigtable.data.{subpath}._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one( + query, + table, + operation_timeout=expected_operation_timeout, + attempt_timeout=expected_request_timeout, + ) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with( + expected_request_timeout, expected_operation_timeout + ) + assert instance._last_yielded_row_key is None + assert instance._remaining_count == row_limit + assert instance.operation_timeout == expected_operation_timeout + assert client.read_rows.call_count == 0 + assert instance.request.table_name == "test_table" + assert instance.request.app_profile_id == table.app_profile_id + assert instance.request.rows_limit == row_limit + + @pytest.mark.parametrize( + "in_keys,last_key,expected", + [ + (["b", "c", "d"], "a", ["b", "c", "d"]), + (["a", "b", "c"], "b", ["c"]), + (["a", "b", "c"], "c", []), + (["a", "b", "c"], "d", []), + (["d", "c", "b", "a"], "b", ["d", "c"]), + ], + ) + @pytest.mark.parametrize("with_range", [True, False]) + def test_revise_request_rowset_keys_with_range( + self, in_keys, last_key, expected, with_range + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + in_keys = [key.encode("utf-8") for key in in_keys] + expected = [key.encode("utf-8") for key in expected] + last_key = last_key.encode("utf-8") + + if with_range: + sample_range = [RowRangePB(start_key_open=last_key)] + else: + sample_range = [] + row_set = RowSetPB(row_keys=in_keys, row_ranges=sample_range) + if not with_range and expected == []: + # expect exception if we are revising to an empty rowset + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == expected + assert revised.row_ranges == sample_range + + @pytest.mark.parametrize( + "in_ranges,last_key,expected", + [ + ( + [{"start_key_open": "b", "end_key_closed": "d"}], + "a", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "a", + [{"start_key_closed": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_open": "a", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "a", "end_key_open": "d"}], + "b", + [{"start_key_open": "b", "end_key_open": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), + ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), + ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), + ( + [{"end_key_closed": "z"}], + "a", + [{"start_key_open": "a", "end_key_closed": "z"}], + ), + ( + [{"end_key_open": "z"}], + "a", + [{"start_key_open": "a", "end_key_open": "z"}], + ), + ], + ) + @pytest.mark.parametrize("with_key", [True, False]) + def test_revise_request_rowset_ranges( + self, in_ranges, last_key, expected, with_key + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + # convert to protobuf + next_key = (last_key + "a").encode("utf-8") + last_key = last_key.encode("utf-8") + in_ranges = [ + RowRangePB(**{k: v.encode("utf-8") for k, v in r.items()}) + for r in in_ranges + ] + expected = [ + RowRangePB(**{k: v.encode("utf-8") for k, v in r.items()}) for r in expected + ] + + if with_key: + row_keys = [next_key] + else: + row_keys = [] + + row_set = RowSetPB(row_ranges=in_ranges, row_keys=row_keys) + if not with_key and expected == []: + # expect exception if we are revising to an empty rowset + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == row_keys + assert revised.row_ranges == expected + + @pytest.mark.parametrize("last_key", ["a", "b", "c"]) + def test_revise_request_full_table(self, last_key): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + # convert to protobuf + last_key = last_key.encode("utf-8") + row_set = RowSetPB() + for selected_set in [row_set, None]: + revised = self._get_target_class()._revise_request_rowset( + selected_set, last_key + ) + assert revised.row_keys == [] + assert len(revised.row_ranges) == 1 + assert revised.row_ranges[0] == RowRangePB(start_key_open=last_key) + + def test_revise_to_empty_rowset(self): + """revising to an empty rowset should raise error""" + from google.cloud.bigtable.data.exceptions import _RowSetComplete + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + row_keys = [b"a", b"b", b"c"] + row_range = RowRangePB(end_key_open=b"c") + row_set = RowSetPB(row_keys=row_keys, row_ranges=[row_range]) + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, b"d") + + @pytest.mark.parametrize( + "start_limit,emit_num,expected_limit", + [ + (10, 0, 10), + (10, 1, 9), + (10, 10, 0), + (None, 10, None), + (None, 0, None), + (4, 2, 2), + ], + ) + @CrossSync.pytest + async def test_revise_limit(self, start_limit, emit_num, expected_limit): + """ + revise_limit should revise the request's limit field + - if limit is 0 (unlimited), it should never be revised + - if start_limit-emit_num == 0, the request should end early + - if the number emitted exceeds the new limit, an exception should + should be raised (tested in test_revise_limit_over_limit) + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + + async def awaitable_stream(): + async def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table._request_path = {"table_name": "table_name"} + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + # read emit_num rows + async for val in instance.chunk_stream(awaitable_stream()): + pass + assert instance._remaining_count == expected_limit + + @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) + @CrossSync.pytest + async def test_revise_limit_over_limit(self, start_limit, emit_num): + """ + Should raise runtime error if we get in state where emit_num > start_num + (unless start_num == 0, which represents unlimited) + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + from google.cloud.bigtable.data.exceptions import InvalidChunk + + async def awaitable_stream(): + async def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table._request_path = {"table_name": "table_name"} + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + with pytest.raises(InvalidChunk) as e: + # read emit_num rows + async for val in instance.chunk_stream(awaitable_stream()): + pass + assert "emit count exceeds row limit" in str(e.value) + + @CrossSync.pytest + @CrossSync.convert( + sync_name="test_close", + replace_symbols={"aclose": "close", "__anext__": "__next__"}, + ) + async def test_aclose(self): + """ + should be able to close a stream safely with aclose. + Closed generators should raise StopAsyncIteration on next yield + """ + + async def mock_stream(): + while True: + yield 1 + + with mock.patch.object( + self._get_target_class(), "_read_rows_attempt" + ) as mock_attempt: + instance = self._make_one(mock.Mock(), mock.Mock(), 1, 1) + wrapped_gen = mock_stream() + mock_attempt.return_value = wrapped_gen + gen = instance.start_operation() + # read one row + await gen.__anext__() + await gen.aclose() + with pytest.raises(CrossSync.StopIteration): + await gen.__anext__() + # try calling a second time + await gen.aclose() + # ensure close was propagated to wrapped generator + with pytest.raises(CrossSync.StopIteration): + await wrapped_gen.__anext__() + + @CrossSync.pytest + @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) + async def test_retryable_ignore_repeated_rows(self): + """ + Duplicate rows should cause an invalid chunk error + """ + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import ReadRowsResponse + + row_key = b"duplicate" + + async def mock_awaitable_stream(): + async def mock_stream(): + while True: + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + + return mock_stream() + + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + stream = self._get_target_class().chunk_stream( + instance, mock_awaitable_stream() + ) + await stream.__anext__() + with pytest.raises(InvalidChunk) as exc: + await stream.__anext__() + assert "row keys should be strictly increasing" in str(exc.value) diff --git a/tests/unit/data/_async/test__swappable_channel.py b/tests/unit/data/_async/test__swappable_channel.py new file mode 100644 index 000000000..14fef2c85 --- /dev/null +++ b/tests/unit/data/_async/test__swappable_channel.py @@ -0,0 +1,135 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + +import pytest +from grpc import ChannelConnectivity + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel as TargetType, + ) +else: + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as TargetType, + ) + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__swappable_channel" + + +@CrossSync.convert_class(sync_name="TestSwappableChannel") +class TestAsyncSwappableChannel: + @staticmethod + @CrossSync.convert + def _get_target_class(): + return TargetType + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + assert instance._channel_fn == channel_fn + channel_fn.assert_called_once_with() + assert instance._channel == channel_fn.return_value + + def test_swap_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + old_channel = instance._channel + new_channel = object() + result = instance.swap_channel(new_channel) + assert result == old_channel + assert instance._channel == new_channel + + def test_create_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + # reset mock from ctor call + channel_fn.reset_mock() + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + + @CrossSync.drop + def test_create_channel_async_interceptors_copied(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + # reset mock from ctor call + channel_fn.reset_mock() + # mock out interceptors on original channel + instance._channel._unary_unary_interceptors = ["unary_unary"] + instance._channel._unary_stream_interceptors = ["unary_stream"] + instance._channel._stream_unary_interceptors = ["stream_unary"] + instance._channel._stream_stream_interceptors = ["stream_stream"] + + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + assert new_channel._unary_unary_interceptors == ["unary_unary"] + assert new_channel._unary_stream_interceptors == ["unary_stream"] + assert new_channel._stream_unary_interceptors == ["stream_unary"] + assert new_channel._stream_stream_interceptors == ["stream_stream"] + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("unary_unary", (1,), {"kw": 2}), + ("unary_stream", (3,), {"kw": 4}), + ("stream_unary", (5,), {"kw": 6}), + ("stream_stream", (7,), {"kw": 8}), + ("get_state", (), {"try_to_connect": True}), + ], + ) + def test_forwarded_methods(self, method_name, args, kwargs): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method = getattr(channel_fn.return_value, method_name) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock_method.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("channel_ready", (), {}), + ("wait_for_state_change", (ChannelConnectivity.READY,), {}), + ], + ) + @CrossSync.pytest + async def test_forwarded_async_methods(self, method_name, args, kwargs): + async def dummy_coro(*a, **k): + return mock.sentinel.result + + channel = mock.Mock() + mock_method = getattr(channel, method_name) + mock_method.side_effect = dummy_coro + + channel_fn = mock.Mock(return_value=channel) + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = await method(*args, **kwargs) + + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock.sentinel.result diff --git a/tests/unit/data/_async/test_client.py b/tests/unit/data/_async/test_client.py new file mode 100644 index 000000000..9f65d120b --- /dev/null +++ b/tests/unit/data/_async/test_client.py @@ -0,0 +1,3644 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import grpc +import asyncio +import re +import sys + +import pytest +import mock + +from google.cloud.bigtable.data import mutations +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_v2.types import ReadRowsResponse +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.api_core import exceptions as core_exceptions +from google.api_core import client_options +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.cloud.bigtable.data import TABLE_DEFAULT + +from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule +from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse + +from google.cloud.bigtable.data._cross_sync import CrossSync +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + null_val, + prepare_response, + str_type, + str_val, +) + +if CrossSync.is_async: + from google.api_core import grpc_helpers_async + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel, + ) + from google.cloud.bigtable.data._async.metrics_interceptor import ( + AsyncBigtableMetricsInterceptor, + ) + + CrossSync.add_mapping("grpc_helpers", grpc_helpers_async) + CrossSync.add_mapping("SwappableChannel", AsyncSwappableChannel) + CrossSync.add_mapping("MetricsInterceptor", AsyncBigtableMetricsInterceptor) +else: + from google.api_core import grpc_helpers + from google.cloud.bigtable.data._sync_autogen.client import Table # noqa: F401 + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel, + ) + from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor, + ) + + CrossSync.add_mapping("grpc_helpers", grpc_helpers) + CrossSync.add_mapping("SwappableChannel", SwappableChannel) + CrossSync.add_mapping("MetricsInterceptor", BigtableMetricsInterceptor) + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_client" + + +@CrossSync.convert_class( + sync_name="TestBigtableDataClient", + add_mapping_for_name="TestBigtableDataClient", +) +class TestBigtableDataClientAsync: + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.DataClient + + @classmethod + def _make_client(cls, *args, use_emulator=True, **kwargs): + import os + + env_mask = {} + # by default, use emulator mode to avoid auth issues in CI + # emulator mode must be disabled by tests that check channel pooling/refresh background tasks + if use_emulator: + env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" + import warnings + + warnings.filterwarnings("ignore", category=RuntimeWarning) + else: + # set some default values + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + kwargs["project"] = kwargs.get("project", "project-id") + with mock.patch.dict(os.environ, env_mask): + return cls._get_target_class()(*args, **kwargs) + + @CrossSync.pytest + async def test_ctor(self): + expected_project = "project-id" + expected_credentials = AnonymousCredentials() + client = self._make_client( + project="project-id", + credentials=expected_credentials, + use_emulator=False, + ) + await CrossSync.yield_to_event_loop() + assert client.project == expected_project + assert not client._active_instances + assert client._channel_refresh_task is not None + assert client.transport._credentials == expected_credentials + assert isinstance(client._metrics_interceptor, CrossSync.MetricsInterceptor) + await client.close() + + @CrossSync.pytest + async def test_ctor_super_inits(self): + from google.cloud.client import ClientWithProject + from google.api_core import client_options as client_options_lib + + project = "project-id" + credentials = AnonymousCredentials() + client_options = {"api_endpoint": "foo.bar:1234"} + options_parsed = client_options_lib.from_dict(client_options) + with mock.patch.object( + CrossSync.GapicClient, "__init__" + ) as bigtable_client_init: + bigtable_client_init.return_value = None + with mock.patch.object( + ClientWithProject, "__init__" + ) as client_project_init: + client_project_init.return_value = None + try: + self._make_client( + project=project, + credentials=credentials, + client_options=options_parsed, + use_emulator=False, + ) + except AttributeError: + pass + # test gapic superclass init was called + assert bigtable_client_init.call_count == 1 + kwargs = bigtable_client_init.call_args[1] + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + # test mixin superclass init was called + assert client_project_init.call_count == 1 + kwargs = client_project_init.call_args[1] + assert kwargs["project"] == project + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + + @CrossSync.pytest + async def test_ctor_dict_options(self): + from google.api_core.client_options import ClientOptions + + client_options = {"api_endpoint": "foo.bar:1234"} + with mock.patch.object( + CrossSync.GapicClient, "__init__" + ) as bigtable_client_init: + try: + self._make_client(client_options=client_options) + except TypeError: + pass + bigtable_client_init.assert_called_once() + kwargs = bigtable_client_init.call_args[1] + called_options = kwargs["client_options"] + assert called_options.api_endpoint == "foo.bar:1234" + assert isinstance(called_options, ClientOptions) + with mock.patch.object( + self._get_target_class(), "_start_background_channel_refresh" + ) as start_background_refresh: + client = self._make_client( + client_options=client_options, use_emulator=False + ) + start_background_refresh.assert_called_once() + await client.close() + + @CrossSync.pytest + async def test_veneer_grpc_headers(self): + client_component = "data-async" if CrossSync.is_async else "data" + VENEER_HEADER_REGEX = re.compile( + r"gapic\/[0-9]+\.[\w.-]+ gax\/[0-9]+\.[\w.-]+ gccl\/[0-9]+\.[\w.-]+-" + + client_component + + r" gl-python\/[0-9]+\.[\w.-]+ grpc\/[0-9]+\.[\w.-]+" + ) + + # client_info should be populated with headers to + # detect as a veneer client + if CrossSync.is_async: + patch = mock.patch("google.api_core.gapic_v1.method_async.wrap_method") + else: + patch = mock.patch("google.api_core.gapic_v1.method.wrap_method") + with patch as gapic_mock: + client = self._make_client(project="project-id") + wrapped_call_list = gapic_mock.call_args_list + assert len(wrapped_call_list) > 0 + # each wrapped call should have veneer headers + for call in wrapped_call_list: + client_info = call.kwargs["client_info"] + assert client_info is not None, f"{call} has no client_info" + wrapped_user_agent_sorted = " ".join( + sorted(client_info.to_user_agent().split(" ")) + ) + assert VENEER_HEADER_REGEX.match( + wrapped_user_agent_sorted + ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" + await client.close() + + @CrossSync.drop + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__start_background_channel_refresh_sync(self): + # should raise RuntimeError if called in a sync context + client = self._make_client(project="project-id", use_emulator=False) + with pytest.raises(RuntimeError): + client._start_background_channel_refresh() + + @CrossSync.pytest + async def test__start_background_channel_refresh_task_exists(self): + # if tasks exist, should do nothing + client = self._make_client(project="project-id", use_emulator=False) + assert client._channel_refresh_task is not None + with mock.patch.object(asyncio, "create_task") as create_task: + client._start_background_channel_refresh() + create_task.assert_not_called() + await client.close() + + @CrossSync.pytest + async def test__start_background_channel_refresh(self): + # should create background tasks for each channel + client = self._make_client(project="project-id") + with mock.patch.object( + client, "_ping_and_warm_instances", CrossSync.Mock() + ) as ping_and_warm: + client._emulator_host = None + client.transport._grpc_channel = CrossSync.SwappableChannel(mock.Mock) + client._start_background_channel_refresh() + assert client._channel_refresh_task is not None + assert isinstance(client._channel_refresh_task, CrossSync.Task) + await CrossSync.sleep(0.1) + assert ping_and_warm.call_count == 1 + await client.close() + + @CrossSync.drop + @CrossSync.pytest + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="Task.name requires python3.8 or higher" + ) + async def test__start_background_channel_refresh_task_names(self): + # if tasks exist, should do nothing + client = self._make_client(project="project-id", use_emulator=False) + name = client._channel_refresh_task.get_name() + assert "channel refresh" in name + await client.close() + + @CrossSync.pytest + async def test__ping_and_warm_instances(self): + """ + test ping and warm with mocked asyncio.gather + """ + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync, "gather_partials", CrossSync.Mock() + ) as gather: + # gather_partials is expected to call the function passed, and return the result + gather.side_effect = lambda partials, **kwargs: [None for _ in partials] + channel = mock.Mock() + # test with no instances + client_mock._active_instances = [] + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 0 + assert gather.call_args[1]["return_exceptions"] is True + assert gather.call_args[1]["sync_executor"] == client_mock._executor + # test with instances + client_mock._active_instances = [(mock.Mock(), mock.Mock())] * 4 + gather.reset_mock() + channel.reset_mock() + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 4 + gather.assert_called_once() + # expect one partial for each instance + partial_list = gather.call_args.args[0] + assert len(partial_list) == 4 + if CrossSync.is_async: + gather.assert_awaited_once() + # check grpc call arguments + grpc_call_args = channel.unary_unary().call_args_list + for idx, (_, kwargs) in enumerate(grpc_call_args): + ( + expected_instance, + expected_app_profile, + ) = client_mock._active_instances[idx] + request = kwargs["request"] + assert request["name"] == expected_instance + assert request["app_profile_id"] == expected_app_profile + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] + == f"name={expected_instance}&app_profile_id={expected_app_profile}" + ) + + @CrossSync.pytest + async def test__ping_and_warm_single_instance(self): + """ + should be able to call ping and warm with single instance + """ + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync, "gather_partials", CrossSync.Mock() + ) as gather: + gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] + # test with large set of instances + client_mock._active_instances = [mock.Mock()] * 100 + test_key = ("test-instance", "test-app-profile") + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, test_key + ) + # should only have been called with test instance + assert len(result) == 1 + # check grpc call arguments + grpc_call_args = ( + client_mock.transport.grpc_channel.unary_unary().call_args_list + ) + assert len(grpc_call_args) == 1 + kwargs = grpc_call_args[0][1] + request = kwargs["request"] + assert request["name"] == "test-instance" + assert request["app_profile_id"] == "test-app-profile" + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] == "name=test-instance&app_profile_id=test-app-profile" + ) + + @CrossSync.pytest + @pytest.mark.parametrize( + "refresh_interval, wait_time, expected_sleep", + [ + (0, 0, 0), + (0, 1, 0), + (10, 0, 10), + (10, 5, 5), + (10, 10, 0), + (10, 15, 0), + ], + ) + async def test__manage_channel_first_sleep( + self, refresh_interval, wait_time, expected_sleep + ): + # first sleep time should be `refresh_interval` seconds after client init + import time + + with mock.patch.object(time, "monotonic") as monotonic: + monotonic.return_value = 0 + with mock.patch.object(CrossSync, "event_wait") as sleep: + sleep.side_effect = asyncio.CancelledError + try: + client = self._make_client(project="project-id") + client._channel_init_time = -wait_time + await client._manage_channel(refresh_interval, refresh_interval) + except asyncio.CancelledError: + pass + sleep.assert_called_once() + call_time = sleep.call_args[0][1] + assert ( + abs(call_time - expected_sleep) < 0.1 + ), f"refresh_interval: {refresh_interval}, wait_time: {wait_time}, expected_sleep: {expected_sleep}" + await client.close() + + @CrossSync.pytest + async def test__manage_channel_ping_and_warm(self): + """ + _manage channel should call ping and warm internally + """ + import threading + + client = self._make_client(project="project-id", use_emulator=True) + orig_channel = client.transport.grpc_channel + # should ping an warm all new channels, and old channels if sleeping + sleep_tuple = ( + (asyncio, "sleep") if CrossSync.is_async else (threading.Event, "wait") + ) + with mock.patch.object(*sleep_tuple) as sleep_mock: + # stop process after loop + sleep_mock.side_effect = [None, asyncio.CancelledError] + ping_and_warm = client._ping_and_warm_instances = CrossSync.Mock() + # should ping and warm old channel then new if sleep > 0 + try: + await client._manage_channel(10) + except asyncio.CancelledError: + pass + # should have called at loop start, and after replacement + assert ping_and_warm.call_count == 2 + # should have replaced channel once + assert client.transport.grpc_channel._channel != orig_channel + # make sure new and old channels were warmed + called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] + assert orig_channel in called_with + assert client.transport.grpc_channel._channel in called_with + + @CrossSync.pytest + @pytest.mark.parametrize( + "refresh_interval, num_cycles, expected_sleep", + [ + (None, 1, 60 * 35), + (10, 10, 100), + (10, 1, 10), + ], + ) + async def test__manage_channel_sleeps( + self, refresh_interval, num_cycles, expected_sleep + ): + # make sure that sleeps work as expected + import time + import random + + with mock.patch.object(random, "uniform") as uniform: + uniform.side_effect = lambda min_, max_: min_ + with mock.patch.object(time, "time") as time_mock: + time_mock.return_value = 0 + with mock.patch.object(CrossSync, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles - 1)] + [ + asyncio.CancelledError + ] + client = self._make_client(project="project-id", use_emulator=True) + with mock.patch.object( + client.transport, "create_channel", CrossSync.Mock + ): + try: + if refresh_interval is not None: + await client._manage_channel( + refresh_interval, refresh_interval, grace_period=0 + ) + else: + await client._manage_channel(grace_period=0) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + total_sleep = sum([call[0][1] for call in sleep.call_args_list]) + assert ( + abs(total_sleep - expected_sleep) < 0.5 + ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" + await client.close() + + @CrossSync.pytest + async def test__manage_channel_random(self): + import random + + with mock.patch.object(CrossSync, "event_wait") as sleep: + with mock.patch.object(random, "uniform") as uniform: + uniform.return_value = 0 + try: + uniform.side_effect = asyncio.CancelledError + client = self._make_client(project="project-id") + except asyncio.CancelledError: + uniform.side_effect = None + uniform.reset_mock() + sleep.reset_mock() + with mock.patch.object(client.transport, "create_channel"): + min_val = 200 + max_val = 205 + uniform.side_effect = lambda min_, max_: min_ + sleep.side_effect = [None, asyncio.CancelledError] + try: + await client._manage_channel(min_val, max_val, grace_period=0) + except asyncio.CancelledError: + pass + assert uniform.call_count == 2 + uniform_args = [call[0] for call in uniform.call_args_list] + for found_min, found_max in uniform_args: + assert found_min == min_val + assert found_max == max_val + + @CrossSync.pytest + @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) + async def test__manage_channel_refresh(self, num_cycles): + # make sure that channels are properly refreshed + expected_refresh = 0.5 + grpc_lib = grpc.aio if CrossSync.is_async else grpc + new_channel = grpc_lib.insecure_channel("localhost:8080") + create_channel_mock = mock.Mock() + create_channel_mock.return_value = new_channel + refreshable_channel = CrossSync.SwappableChannel(create_channel_mock) + + with mock.patch.object(CrossSync, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] + client = self._make_client(project="project-id") + client.transport._grpc_channel = refreshable_channel + create_channel_mock.reset_mock() + sleep.reset_mock() + try: + await client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=0, + ) + except RuntimeError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel_mock.call_count == num_cycles + await client.close() + + @CrossSync.pytest + async def test__register_instance(self): + """ + test instance registration + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync.Mock() + table_mock = mock.Mock() + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + # first call should start background refresh + assert client_mock._start_background_channel_refresh.call_count == 1 + # ensure active_instances and instance_owners were updated properly + expected_key = ( + "prefix/instance-1", + table_mock.app_profile_id, + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + # simulate creation of refresh task + client_mock._channel_refresh_task = mock.Mock() + # next call should not call _start_background_channel_refresh again + table_mock2 = mock.Mock() + await self._get_target_class()._register_instance( + client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2) + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + assert ( + client_mock._ping_and_warm_instances.call_args[0][0][0] + == "prefix/instance-2" + ) + # but it should call ping and warm with new instance key + assert client_mock._ping_and_warm_instances.call_count == 1 + # check for updated lists + assert len(active_instances) == 2 + assert len(instance_owners) == 2 + expected_key2 = ( + "prefix/instance-2", + table_mock2.app_profile_id, + ) + assert any( + [ + expected_key2 == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + assert any( + [ + expected_key2 == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + @CrossSync.pytest + async def test__register_instance_duplicate(self): + """ + test double instance registration. Should be no-op + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = object() + mock_channels = [mock.Mock()] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = CrossSync.Mock() + table_mock = mock.Mock() + expected_key = ( + "prefix/instance-1", + table_mock.app_profile_id, + ) + # fake first registration + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + # should have called ping and warm + assert client_mock._ping_and_warm_instances.call_count == 1 + # next call should do nothing + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + + @CrossSync.pytest + @pytest.mark.parametrize( + "insert_instances,expected_active,expected_owner_keys", + [ + ([("i", None)], [("i", None)], [("i", None)]), + ([("i", "p")], [("i", "p")], [("i", "p")]), + ([("1", "p"), ("1", "p")], [("1", "p")], [("1", "p")]), + ( + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + ), + ], + ) + async def test__register_instance_state( + self, insert_instances, expected_active, expected_owner_keys + ): + """ + test that active_instances and instance_owners are updated as expected + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: b + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync.Mock() + table_mock = mock.Mock() + # register instances + for instance, profile in insert_instances: + table_mock.app_profile_id = profile + await self._get_target_class()._register_instance( + client_mock, instance, profile, id(table_mock) + ) + assert len(active_instances) == len(expected_active) + assert len(instance_owners) == len(expected_owner_keys) + for expected in expected_active: + assert any( + [ + expected == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + for expected in expected_owner_keys: + assert any( + [ + expected == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + @CrossSync.pytest + async def test__remove_instance_registration(self): + client = self._make_client(project="project-id") + table = mock.Mock() + await client._register_instance("instance-1", table.app_profile_id, id(table)) + await client._register_instance("instance-2", table.app_profile_id, id(table)) + assert len(client._active_instances) == 2 + assert len(client._instance_owners.keys()) == 2 + instance_1_path = client._gapic_client.instance_path( + client.project, "instance-1" + ) + instance_1_key = (instance_1_path, table.app_profile_id) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance-2" + ) + instance_2_key = (instance_2_path, table.app_profile_id) + assert len(client._instance_owners[instance_1_key]) == 1 + assert list(client._instance_owners[instance_1_key])[0] == id(table) + assert len(client._instance_owners[instance_2_key]) == 1 + assert list(client._instance_owners[instance_2_key])[0] == id(table) + success = client._remove_instance_registration( + "instance-1", table.app_profile_id, id(table) + ) + assert success + assert len(client._active_instances) == 1 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 1 + assert client._active_instances == {instance_2_key} + success = client._remove_instance_registration("fake-key", "profile", id(table)) + assert not success + assert len(client._active_instances) == 1 + await client.close() + + @CrossSync.pytest + async def test__multiple_table_registration(self): + """ + registering with multiple tables with the same key should + add multiple owners to instance_owners, but only keep one copy + of shared key in active_instances + """ + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + async with self._make_client(project="project-id") as client: + async with client.get_table("instance_1", "table_1") as table_1: + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + # duplicate table should register in instance_owners under same key + async with client.get_table("instance_1", "table_2") as table_2: + assert table_2._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_2._register_instance_future.result() + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + # unique table should register in instance_owners and active_instances + async with client.get_table( + "instance_1", "table_3", app_profile_id="diff" + ) as table_3: + assert table_3._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_3._register_instance_future.result() + instance_3_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_3_key = _WarmedInstanceKey( + instance_3_path, table_3.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._instance_owners[instance_3_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + assert id(table_3) in client._instance_owners[instance_3_key] + # sub-tables should be unregistered, but instance should still be active + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert id(table_2) not in client._instance_owners[instance_1_key] + # both tables are gone. instance should be unregistered + assert len(client._active_instances) == 0 + assert instance_1_key not in client._active_instances + assert len(client._instance_owners[instance_1_key]) == 0 + + @CrossSync.pytest + async def test__multiple_instance_registration(self): + """ + registering with multiple instance keys should update the key + in instance_owners and active_instances + """ + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + async with self._make_client(project="project-id") as client: + async with client.get_table("instance_1", "table_1") as table_1: + assert table_1._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_1._register_instance_future.result() + async with client.get_table("instance_2", "table_2") as table_2: + assert table_2._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_2._register_instance_future.result() + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.app_profile_id + ) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance_2" + ) + instance_2_key = _WarmedInstanceKey( + instance_2_path, table_2.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._instance_owners[instance_2_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_2_key] + # instance2 should be unregistered, but instance1 should still be active + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert len(client._instance_owners[instance_2_key]) == 0 + assert len(client._instance_owners[instance_1_key]) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + # both tables are gone. instances should both be unregistered + assert len(client._active_instances) == 0 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 0 + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + @CrossSync.pytest + async def test_get_api_surface(self, method): + """ + test client.get_table and client.get_authorized_view + """ + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + client = self._make_client(project="project-id") + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + if method == "get_table": + surface = client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + assert isinstance(surface, CrossSync.TestTable._get_target_class()) + elif method == "get_authorized_view": + surface = client.get_authorized_view( + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + assert isinstance(surface, CrossSync.TestAuthorizedView._get_target_class()) + assert ( + surface.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/view_id" + ) + else: + raise TypeError(f"unexpected method: {method}") + await CrossSync.yield_to_event_loop() + assert surface.table_id == expected_table_id + assert ( + surface.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert surface.instance_id == expected_instance_id + assert ( + surface.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert surface.app_profile_id == expected_app_profile_id + assert surface.client is client + instance_key = _WarmedInstanceKey(surface.instance_name, surface.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(surface)} + await client.close() + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + @CrossSync.pytest + async def test_api_surface_arg_passthrough(self, method): + """ + All arguments passed in get_table and get_authorized_view should be sent to constructor + """ + if method == "get_table": + surface_type = CrossSync.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + + async with self._make_client(project="project-id") as client: + with mock.patch.object(surface_type, "__init__") as mock_constructor: + mock_constructor.return_value = None + assert not client._active_instances + expected_args = ( + "table", + "instance", + "view", + "app_profile", + 1, + "test", + {"test": 2}, + ) + expected_kwargs = {"hello": "world", "test": 2} + + getattr(client, method)( + *expected_args, + **expected_kwargs, + ) + mock_constructor.assert_called_once_with( + client, + *expected_args, + **expected_kwargs, + ) + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + @CrossSync.pytest + async def test_api_surface_context_manager(self, method): + """ + get_table and get_authorized_view should work as context managers + """ + from functools import partial + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_project_id = "project-id" + + if method == "get_table": + surface_type = CrossSync.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + + with mock.patch.object(surface_type, "close") as close_mock: + async with self._make_client(project=expected_project_id) as client: + if method == "get_table": + fn = partial( + client.get_table, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + elif method == "get_authorized_view": + fn = partial( + client.get_authorized_view, + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + else: + raise TypeError(f"unexpected method: {method}") + async with fn() as table: + await CrossSync.yield_to_event_loop() + assert isinstance(table, surface_type) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert close_mock.call_count == 1 + + @CrossSync.pytest + async def test_close(self): + client = self._make_client(project="project-id", use_emulator=False) + task = client._channel_refresh_task + assert task is not None + assert not task.done() + with mock.patch.object( + client.transport, "close", CrossSync.Mock() + ) as close_mock: + await client.close() + close_mock.assert_called_once() + if CrossSync.is_async: + close_mock.assert_awaited() + assert task.done() + assert client._channel_refresh_task is None + + @CrossSync.pytest + async def test_close_with_timeout(self): + expected_timeout = 19 + client = self._make_client(project="project-id", use_emulator=False) + with mock.patch.object(CrossSync, "wait", CrossSync.Mock()) as wait_for_mock: + await client.close(timeout=expected_timeout) + wait_for_mock.assert_called_once() + if CrossSync.is_async: + wait_for_mock.assert_awaited() + assert wait_for_mock.call_args[1]["timeout"] == expected_timeout + await client.close() + + @CrossSync.pytest + async def test_context_manager(self): + from functools import partial + + # context manager should close the client cleanly + close_mock = CrossSync.Mock() + true_close = None + async with self._make_client( + project="project-id", use_emulator=False + ) as client: + # grab reference to close coro for async test + true_close = partial(client.close) + client.close = close_mock + assert not client._channel_refresh_task.done() + assert client.project == "project-id" + assert client._active_instances == set() + close_mock.assert_not_called() + close_mock.assert_called_once() + if CrossSync.is_async: + close_mock.assert_awaited() + # actually close the client + await true_close() + + @CrossSync.drop + def test_client_ctor_sync(self): + # initializing client in a sync context should raise RuntimeError + + with pytest.warns(RuntimeWarning) as warnings: + client = self._make_client(project="project-id", use_emulator=False) + expected_warning = [w for w in warnings if "client.py" in w.filename] + assert len(expected_warning) == 1 + assert ( + "BigtableDataClientAsync should be started in an asyncio event loop." + in str(expected_warning[0].message) + ) + assert client.project == "project-id" + assert client._channel_refresh_task is None + + @CrossSync.pytest + async def test_default_universe_domain(self): + """ + When not passed, universe_domain should default to googleapis.com + """ + async with self._make_client(project="project-id", credentials=None) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_custom_universe_domain(self): + """test with a customized universe domain value and emulator enabled""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + async with self._make_client( + project="project_id", + client_options=options, + use_emulator=True, + credentials=None, + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + @CrossSync.pytest + async def test_configured_universe_domain_matches_GDU(self): + """that configured universe domain succeeds with matched GDU credentials.""" + universe_domain = "googleapis.com" + options = client_options.ClientOptions(universe_domain=universe_domain) + async with self._make_client( + project="project_id", client_options=options, credentials=None + ) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_credential_universe_domain_matches_GDU(self): + """Test with credentials""" + creds = AnonymousCredentials() + creds._universe_domain = "googleapis.com" + async with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_anomynous_credential_universe_domain(self): + """Anomynopus credentials should use default universe domain""" + creds = AnonymousCredentials() + async with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_configured_universe_domain_mismatched_credentials(self): + """Test that configured universe domain errors with mismatched universe + domain credentials. + """ + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = "different-universe" + with pytest.raises(ValueError) as exc: + self._make_client( + project="project_id", + client_options=options, + use_emulator=False, + credentials=creds, + ) + err_msg = ( + f"The configured universe domain ({universe_domain}) does " + "not match the universe domain found in the credentials " + f"({creds.universe_domain}). If you haven't " + "configured the universe domain explicitly, `googleapis.com` " + "is the default." + ) + assert exc.value.args[0] == err_msg + + @CrossSync.pytest + async def test_configured_universe_domain_matches_credentials(self): + """Test that configured universe domain succeeds with matching universe + domain credentials. + """ + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = universe_domain + async with self._make_client( + project="project_id", credentials=creds, client_options=options + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + +@CrossSync.convert_class("TestTable", add_mapping_for_name="TestTable") +class TestTableAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.Table + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, app_profile_id, **kwargs + ) + + @CrossSync.pytest + async def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + from google.cloud.bigtable.data._metrics import ( + BigtableClientSideMetricsController, + ) + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + + table = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + await CrossSync.yield_to_event_loop() + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert isinstance(table._metrics, BigtableClientSideMetricsController) + assert table.default_operation_timeout == expected_operation_timeout + assert table.default_attempt_timeout == expected_attempt_timeout + assert ( + table.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + table.default_read_rows_attempt_timeout + == expected_read_rows_attempt_timeout + ) + assert ( + table.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + table.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + # ensure task reaches completion + await table._register_instance_future + assert table._register_instance_future.done() + assert not table._register_instance_future.cancelled() + assert table._register_instance_future.exception() is None + await client.close() + + @CrossSync.pytest + async def test_ctor_defaults(self): + """ + should provide default timeout values and app_profile_id + """ + client = self._make_client() + assert not client._active_instances + + table = self._make_one(client) + await CrossSync.yield_to_event_loop() + assert table.app_profile_id is None + assert table.client is client + assert table.default_operation_timeout == 60 + assert table.default_read_rows_operation_timeout == 600 + assert table.default_mutate_rows_operation_timeout == 600 + assert table.default_attempt_timeout == 20 + assert table.default_read_rows_attempt_timeout == 20 + assert table.default_mutate_rows_attempt_timeout == 60 + await client.close() + + @CrossSync.pytest + async def test_ctor_invalid_timeout_values(self): + """ + bad timeout values should raise ValueError + """ + client = self._make_client() + + timeout_pairs = [ + ("default_operation_timeout", "default_attempt_timeout"), + ( + "default_read_rows_operation_timeout", + "default_read_rows_attempt_timeout", + ), + ( + "default_mutate_rows_operation_timeout", + "default_mutate_rows_attempt_timeout", + ), + ] + for operation_timeout, attempt_timeout in timeout_pairs: + with pytest.raises(ValueError) as e: + self._make_one(client, **{attempt_timeout: -1}) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(client, **{operation_timeout: -1}) + assert "operation_timeout must be greater than 0" in str(e.value) + await client.close() + + @CrossSync.drop + def test_table_ctor_sync(self): + # initializing client in a sync context should raise RuntimeError + client = mock.Mock() + with pytest.raises(RuntimeError) as e: + TableAsync(client, "instance-id", "table-id") + assert e.match("TableAsync must be created within an async event loop context.") + + @CrossSync.pytest + # iterate over all retryable rpcs + @pytest.mark.parametrize( + "fn_name,fn_args,is_stream,extra_retryables", + [ + ( + "read_rows_stream", + (ReadRowsQuery(),), + True, + (), + ), + ( + "read_rows", + (ReadRowsQuery(),), + True, + (), + ), + ( + "read_row", + (b"row_key",), + True, + (), + ), + ( + "read_rows_sharded", + ([ReadRowsQuery()],), + True, + (), + ), + ( + "row_exists", + (b"row_key",), + True, + (), + ), + ("sample_row_keys", (), False, ()), + ( + "mutate_row", + (b"row_key", [DeleteAllFromRow()]), + False, + (), + ), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [DeleteAllFromRow()])],), + False, + (_MutateRowsIncomplete,), + ), + ], + ) + # test different inputs for retryable exceptions + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + core_exceptions.Cancelled, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + async def test_customizable_retryable_errors( + self, + input_retryables, + expected_retryables, + fn_name, + fn_args, + is_stream, + extra_retryables, + ): + """ + Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer. + """ + retry_fn = "retry_target" + if is_stream: + retry_fn += "_stream" + if CrossSync.is_async: + retry_fn = f"CrossSync.{retry_fn}" + else: + retry_fn = f"CrossSync._Sync_Impl.{retry_fn}" + with mock.patch( + f"google.cloud.bigtable.data._cross_sync.{retry_fn}" + ) as retry_fn_mock: + async with self._make_client() as client: + table = client.get_table("instance-id", "table-id") + expected_predicate = expected_retryables.__contains__ + retry_fn_mock.side_effect = RuntimeError("stop early") + with mock.patch( + "google.api_core.retry.if_exception_type" + ) as predicate_builder_mock: + predicate_builder_mock.return_value = expected_predicate + with pytest.raises(Exception): + # we expect an exception from attempting to call the mock + test_fn = table.__getattribute__(fn_name) + await test_fn(*fn_args, retryable_errors=input_retryables) + # passed in errors should be used to build the predicate + predicate_builder_mock.assert_called_once_with( + *expected_retryables, *extra_retryables + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + # output of if_exception_type should be sent in to retry constructor + assert retry_call_args[1] is expected_predicate + + @pytest.mark.parametrize( + "fn_name,fn_args,gapic_fn", + [ + ("read_rows_stream", (ReadRowsQuery(),), "read_rows"), + ("read_rows", (ReadRowsQuery(),), "read_rows"), + ("read_row", (b"row_key",), "read_rows"), + ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), + ("row_exists", (b"row_key",), "read_rows"), + ("sample_row_keys", (), "sample_row_keys"), + ("mutate_row", (b"row_key", [mutations.DeleteAllFromRow()]), "mutate_row"), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + "mutate_rows", + ), + ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), + ( + "read_modify_write_row", + (b"row_key", IncrementRule("f", "q")), + "read_modify_write_row", + ), + ], + ) + @pytest.mark.parametrize("include_app_profile", [True, False]) + @CrossSync.pytest + @CrossSync.convert + async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): + profile = "profile" if include_app_profile else None + client = self._make_client() + # create mock for rpc stub + transport_mock = mock.MagicMock() + rpc_mock = CrossSync.Mock() + transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock + gapic_client = client._gapic_client + if CrossSync.is_async: + # inner BigtableClient is held as ._client for BigtableAsyncClient + gapic_client = gapic_client._client + gapic_client._transport = transport_mock + gapic_client._is_universe_domain_valid = True + table = self._make_one(client, app_profile_id=profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = await test_fn(*fn_args) + [i async for i in maybe_stream] + except Exception: + # we expect an exception from attempting to call the mock + pass + assert rpc_mock.call_count == 1 + kwargs = rpc_mock.call_args_list[0][1] + metadata = kwargs["metadata"] + # expect single metadata entry + assert len(metadata) == 1 + # expect x-goog-request-params tag + assert metadata[0][0] == "x-goog-request-params" + routing_str = metadata[0][1] + assert f"table_name={table.table_name}" in routing_str + if include_app_profile: + assert "app_profile_id=profile" in routing_str + else: + # empty app_profile_id should send empty string + assert "app_profile_id=" in routing_str + + @CrossSync.pytest + async def test_close(self): + client = self._make_client() + table = self._make_one(client) + with mock.patch.object( + table._metrics, "close", mock.Mock() + ) as metric_close_mock: + with mock.patch.object( + client, "_remove_instance_registration" + ) as remove_mock: + await table.close() + remove_mock.assert_called_once_with( + table.instance_id, table.app_profile_id, id(table) + ) + metric_close_mock.assert_called_once() + + +@CrossSync.convert_class( + "TestAuthorizedView", add_mapping_for_name="TestAuthorizedView" +) +class TestAuthorizedViewsAsync(CrossSync.TestTable): + """ + Inherit tests from TestTableAsync, with some modifications + """ + + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.AuthorizedView + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + view_id="view", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, view_id, app_profile_id, **kwargs + ) + + @CrossSync.pytest + async def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + from google.cloud.bigtable.data._metrics import ( + BigtableClientSideMetricsController, + ) + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_view_id = "view_id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + + view = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_view_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + await CrossSync.yield_to_event_loop() + assert view.table_id == expected_table_id + assert ( + view.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert view.instance_id == expected_instance_id + assert ( + view.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert view.authorized_view_id == expected_view_id + assert ( + view.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/{expected_view_id}" + ) + assert view.app_profile_id == expected_app_profile_id + assert view.client is client + instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(view)} + assert isinstance(view._metrics, BigtableClientSideMetricsController) + assert view.default_operation_timeout == expected_operation_timeout + assert view.default_attempt_timeout == expected_attempt_timeout + assert ( + view.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + view.default_read_rows_attempt_timeout == expected_read_rows_attempt_timeout + ) + assert ( + view.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + view.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + # ensure task reaches completion + await view._register_instance_future + assert view._register_instance_future.done() + assert not view._register_instance_future.cancelled() + assert view._register_instance_future.exception() is None + await client.close() + + +@CrossSync.convert_class( + "TestReadRows", + add_mapping_for_name="TestReadRows", +) +class TestReadRowsAsync: + """ + Tests for table.read_rows and related methods. + """ + + @staticmethod + @CrossSync.convert + def _get_operation_class(): + return CrossSync._ReadRowsOperation + + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert + def _make_table(self, *args, **kwargs): + client_mock = mock.Mock() + client_mock._register_instance.side_effect = ( + lambda *args, **kwargs: CrossSync.yield_to_event_loop() + ) + client_mock._remove_instance_registration.side_effect = ( + lambda *args, **kwargs: CrossSync.yield_to_event_loop() + ) + kwargs["instance_id"] = kwargs.get( + "instance_id", args[0] if args else "instance" + ) + kwargs["table_id"] = kwargs.get( + "table_id", args[1] if len(args) > 1 else "table" + ) + client_mock._gapic_client.table_path.return_value = kwargs["table_id"] + client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] + return CrossSync.TestTable._get_target_class()(client_mock, *args, **kwargs) + + def _make_stats(self): + from google.cloud.bigtable_v2.types import RequestStats + from google.cloud.bigtable_v2.types import FullReadStatsView + from google.cloud.bigtable_v2.types import ReadIterationStats + + return RequestStats( + full_read_stats_view=FullReadStatsView( + read_iteration_stats=ReadIterationStats( + rows_seen_count=1, + rows_returned_count=2, + cells_seen_count=3, + cells_returned_count=4, + ) + ) + ) + + @staticmethod + def _make_chunk(*args, **kwargs): + from google.cloud.bigtable_v2 import ReadRowsResponse + + kwargs["row_key"] = kwargs.get("row_key", b"row_key") + kwargs["family_name"] = kwargs.get("family_name", "family_name") + kwargs["qualifier"] = kwargs.get("qualifier", b"qualifier") + kwargs["value"] = kwargs.get("value", b"value") + kwargs["commit_row"] = kwargs.get("commit_row", True) + + return ReadRowsResponse.CellChunk(*args, **kwargs) + + @staticmethod + @CrossSync.convert + async def _make_gapic_stream( + chunk_list: list[ReadRowsResponse.CellChunk | Exception], + sleep_time=0, + ): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list, sleep_time): + self.chunk_list = chunk_list + self.idx = -1 + self.sleep_time = sleep_time + + @CrossSync.convert(sync_name="__iter__") + def __aiter__(self): + return self + + @CrossSync.convert(sync_name="__next__") + async def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + if sleep_time: + await CrossSync.sleep(self.sleep_time) + chunk = self.chunk_list[self.idx] + if isinstance(chunk, Exception): + raise chunk + else: + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync.StopIteration + + def cancel(self): + pass + + return mock_stream(chunk_list, sleep_time) + + @CrossSync.convert + async def execute_fn(self, table, *args, **kwargs): + return await table.read_rows(*args, **kwargs) + + @CrossSync.pytest + async def test_read_rows(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + results = await self.execute_fn(table, query, operation_timeout=3) + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @CrossSync.pytest + async def test_read_rows_stream(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + gen = await table.read_rows_stream(query, operation_timeout=3) + results = [row async for row in gen] + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @pytest.mark.parametrize("include_app_profile", [True, False]) + @CrossSync.pytest + async def test_read_rows_query_matches_request(self, include_app_profile): + from google.cloud.bigtable.data import RowRange + from google.cloud.bigtable.data.row_filters import PassAllFilter + + app_profile_id = "app_profile_id" if include_app_profile else None + async with self._make_table(app_profile_id=app_profile_id) as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream([]) + row_keys = [b"test_1", "test_2"] + row_ranges = RowRange("1start", "2end") + filter_ = PassAllFilter(True) + limit = 99 + query = ReadRowsQuery( + row_keys=row_keys, + row_ranges=row_ranges, + row_filter=filter_, + limit=limit, + ) + + results = await table.read_rows(query, operation_timeout=3) + assert len(results) == 0 + call_request = read_rows.call_args_list[0][0][0] + query_pb = query._to_pb(table) + assert call_request == query_pb + + @pytest.mark.parametrize("operation_timeout", [0.001, 0.023, 0.1]) + @CrossSync.pytest + async def test_read_rows_timeout(self, operation_timeout): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + query = ReadRowsQuery() + chunks = [self._make_chunk(row_key=b"test_1")] + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=0.15 + ) + try: + await table.read_rows(query, operation_timeout=operation_timeout) + except core_exceptions.DeadlineExceeded as e: + assert ( + e.message + == f"operation_timeout of {operation_timeout:0.1f}s exceeded" + ) + + @pytest.mark.parametrize( + "per_request_t, operation_t, expected_num", + [ + (0.1, 0.19, 2), + (0.1, 0.29, 3), + ], + ) + @CrossSync.pytest + async def test_read_rows_attempt_timeout( + self, per_request_t, operation_t, expected_num + ): + """ + Ensures that the attempt_timeout is respected and that the number of + requests is as expected. + + operation_timeout does not cancel the request, so we expect the number of + requests to be the ceiling of operation_timeout / attempt_timeout. + """ + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + expected_last_timeout = operation_t - (expected_num - 1) * per_request_t + + # mocking uniform ensures there are no sleeps between retries + with mock.patch("random.uniform", side_effect=lambda a, b: 0): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=per_request_t + ) + query = ReadRowsQuery() + chunks = [core_exceptions.DeadlineExceeded("mock deadline")] + + try: + await table.read_rows( + query, + operation_timeout=operation_t, + attempt_timeout=per_request_t, + ) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + if expected_num == 0: + assert retry_exc is None + else: + assert type(retry_exc) is RetryExceptionGroup + assert f"{expected_num} failed attempts" in str(retry_exc) + assert len(retry_exc.exceptions) == expected_num + for sub_exc in retry_exc.exceptions: + assert sub_exc.message == "mock deadline" + assert read_rows.call_count == expected_num + # check timeouts + for _, call_kwargs in read_rows.call_args_list[:-1]: + assert call_kwargs["timeout"] == per_request_t + assert call_kwargs["retry"] is None + # last timeout should be adjusted to account for the time spent + assert ( + abs( + read_rows.call_args_list[-1][1]["timeout"] + - expected_last_timeout + ) + < 0.05 + ) + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Aborted, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_read_rows_retryable_error(self, exc_type): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + await table.read_rows(query, operation_timeout=0.1) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + root_cause = retry_exc.exceptions[0] + assert type(root_cause) is exc_type + assert root_cause == expected_error + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.PreconditionFailed, + core_exceptions.NotFound, + core_exceptions.PermissionDenied, + core_exceptions.Conflict, + core_exceptions.InternalServerError, + core_exceptions.TooManyRequests, + core_exceptions.ResourceExhausted, + InvalidChunk, + ], + ) + @CrossSync.pytest + async def test_read_rows_non_retryable_error(self, exc_type): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + await table.read_rows(query, operation_timeout=0.1) + except exc_type as e: + assert e == expected_error + + @CrossSync.pytest + async def test_read_rows_revise_request(self): + """ + Ensure that _revise_request is called between retries + """ + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import RowSet + + return_val = RowSet() + with mock.patch.object( + self._get_operation_class(), "_revise_request_rowset" + ) as revise_rowset: + revise_rowset.return_value = return_val + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + row_keys = [b"test_1", b"test_2", b"test_3"] + query = ReadRowsQuery(row_keys=row_keys) + chunks = [ + self._make_chunk(row_key=b"test_1"), + core_exceptions.Aborted("mock retryable error"), + ] + try: + await table.read_rows(query) + except InvalidChunk: + revise_rowset.assert_called() + first_call_kwargs = revise_rowset.call_args_list[0].kwargs + assert first_call_kwargs["row_set"] == query._to_pb(table).rows + assert first_call_kwargs["last_seen_row_key"] == b"test_1" + revised_call = read_rows.call_args_list[1].args[0] + assert revised_call.rows == return_val + + @CrossSync.pytest + async def test_read_rows_default_timeouts(self): + """ + Ensure that the default timeouts are set on the read rows operation when not overridden + """ + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + async with self._make_table( + default_read_rows_operation_timeout=operation_timeout, + default_read_rows_attempt_timeout=attempt_timeout, + ) as table: + try: + await table.read_rows(ReadRowsQuery()) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + @CrossSync.pytest + async def test_read_rows_default_timeout_override(self): + """ + When timeouts are passed, they overwrite default values + """ + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + async with self._make_table( + default_operation_timeout=99, default_attempt_timeout=97 + ) as table: + try: + await table.read_rows( + ReadRowsQuery(), + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + ) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + @CrossSync.pytest + async def test_read_row(self): + """Test reading a single row""" + async with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + row = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert row == expected_result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @CrossSync.pytest + async def test_read_row_w_filter(self): + """Test reading a single row with an added filter""" + async with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + mock_filter = mock.Mock() + expected_filter = {"filter": "mock filter"} + mock_filter._to_dict.return_value = expected_filter + row = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + row_filter=expected_filter, + ) + assert row == expected_result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter == expected_filter + + @CrossSync.pytest + async def test_read_row_no_response(self): + """should return None if row does not exist""" + async with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + # return no rows + read_rows.side_effect = lambda *args, **kwargs: [] + expected_op_timeout = 8 + expected_req_timeout = 4 + result = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert result is None + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @pytest.mark.parametrize( + "return_value,expected_result", + [ + ([], False), + ([object()], True), + ([object(), object()], True), + ], + ) + @CrossSync.pytest + async def test_row_exists(self, return_value, expected_result): + """Test checking for row existence""" + async with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + # return no rows + read_rows.side_effect = lambda *args, **kwargs: return_value + expected_op_timeout = 1 + expected_req_timeout = 2 + result = await table.row_exists( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert expected_result == result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + expected_filter = { + "chain": { + "filters": [ + {"cells_per_row_limit_filter": 1}, + {"strip_value_transformer": True}, + ] + } + } + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter._to_dict() == expected_filter + + +@CrossSync.convert_class("TestReadRowsSharded") +class TestReadRowsShardedAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.pytest + async def test_read_rows_sharded_empty_query(self): + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as exc: + await table.read_rows_sharded([]) + assert "empty sharded_query" in str(exc.value) + + @CrossSync.pytest + async def test_read_rows_sharded_multiple_queries(self): + """ + Test with multiple queries. Should return results from both + """ + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.side_effect = lambda *args, **kwargs: CrossSync.TestReadRows._make_gapic_stream( + [ + CrossSync.TestReadRows._make_chunk(row_key=k) + for k in args[0].rows.row_keys + ] + ) + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + result = await table.read_rows_sharded([query_1, query_2]) + assert len(result) == 2 + assert result[0].row_key == b"test_1" + assert result[1].row_key == b"test_2" + + @pytest.mark.parametrize("n_queries", [1, 2, 5, 11, 24]) + @CrossSync.pytest + async def test_read_rows_sharded_multiple_queries_calls(self, n_queries): + """ + Each query should trigger a separate read_rows call + """ + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + query_list = [ReadRowsQuery() for _ in range(n_queries)] + await table.read_rows_sharded(query_list) + assert read_rows.call_count == n_queries + + @CrossSync.pytest + async def test_read_rows_sharded_errors(self): + """ + Errors should be exposed as ShardedReadRowsExceptionGroups + """ + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = RuntimeError("mock error") + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded([query_1, query_2]) + exc_group = exc.value + assert isinstance(exc_group, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == 2 + assert isinstance(exc.value.exceptions[0], FailedQueryShardError) + assert isinstance(exc.value.exceptions[0].__cause__, RuntimeError) + assert exc.value.exceptions[0].index == 0 + assert exc.value.exceptions[0].query == query_1 + assert isinstance(exc.value.exceptions[1], FailedQueryShardError) + assert isinstance(exc.value.exceptions[1].__cause__, RuntimeError) + assert exc.value.exceptions[1].index == 1 + assert exc.value.exceptions[1].query == query_2 + + @CrossSync.pytest + async def test_read_rows_sharded_concurrent(self): + """ + Ensure sharded requests are concurrent + """ + import time + + async def mock_call(*args, **kwargs): + await CrossSync.sleep(0.1) + return [mock.Mock()] + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(10)] + start_time = time.monotonic() + result = await table.read_rows_sharded(queries) + call_time = time.monotonic() - start_time + assert read_rows.call_count == 10 + assert len(result) == 10 + # if run in sequence, we would expect this to take 1 second + assert call_time < 0.5 + + @CrossSync.pytest + async def test_read_rows_sharded_concurrency_limit(self): + """ + Only 10 queries should be processed concurrently. Others should be queued + + Should start a new query as soon as previous finishes + """ + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + + assert _CONCURRENCY_LIMIT == 10 # change this test if this changes + num_queries = 15 + + # each of the first 10 queries take longer than the last + # later rpcs will have to wait on first 10 + increment_time = 0.05 + max_time = increment_time * (_CONCURRENCY_LIMIT - 1) + rpc_times = [min(i * increment_time, max_time) for i in range(num_queries)] + + async def mock_call(*args, **kwargs): + next_sleep = rpc_times.pop(0) + await asyncio.sleep(next_sleep) + return [mock.Mock()] + + starting_timeout = 10 + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + await table.read_rows_sharded( + queries, operation_timeout=starting_timeout + ) + assert read_rows.call_count == num_queries + # check operation timeouts to see how far into the operation each rpc started + rpc_start_list = [ + starting_timeout - kwargs["operation_timeout"] + for _, kwargs in read_rows.call_args_list + ] + eps = 0.01 + # first 10 should start immediately + assert all( + rpc_start_list[i] < eps for i in range(_CONCURRENCY_LIMIT) + ) + # next rpcs should start as first ones finish + for i in range(num_queries - _CONCURRENCY_LIMIT): + idx = i + _CONCURRENCY_LIMIT + assert rpc_start_list[idx] - (i * increment_time) < eps + + @CrossSync.pytest + async def test_read_rows_sharded_expirary(self): + """ + If the operation times out before all shards complete, should raise + a ShardedReadRowsExceptionGroup + """ + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + operation_timeout = 0.1 + + # let the first batch complete, but the next batch times out + num_queries = 15 + sleeps = [0] * _CONCURRENCY_LIMIT + [DeadlineExceeded("times up")] * ( + num_queries - _CONCURRENCY_LIMIT + ) + + async def mock_call(*args, **kwargs): + next_item = sleeps.pop(0) + if isinstance(next_item, Exception): + raise next_item + else: + await asyncio.sleep(next_item) + return [mock.Mock()] + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded( + queries, operation_timeout=operation_timeout + ) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == num_queries - _CONCURRENCY_LIMIT + # should keep successful queries + assert len(exc.value.successful_rows) == _CONCURRENCY_LIMIT + + @CrossSync.pytest + async def test_read_rows_sharded_negative_batch_timeout(self): + """ + try to run with batch that starts after operation timeout + + They should raise DeadlineExceeded errors + """ + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.api_core.exceptions import DeadlineExceeded + + async def mock_call(*args, **kwargs): + await CrossSync.sleep(0.06) + return [mock.Mock()] + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + num_calls = 15 + queries = [ReadRowsQuery() for _ in range(num_calls)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded(queries, operation_timeout=0.05) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + # _CONCURRENCY_LIMIT calls will run, and won't be interrupted + # calls after the limit will be cancelled due to timeout + assert len(exc.value.exceptions) >= num_calls - _CONCURRENCY_LIMIT + assert all( + isinstance(e.__cause__, DeadlineExceeded) + for e in exc.value.exceptions + ) + + +@CrossSync.convert_class("TestSampleRowKeys") +class TestSampleRowKeysAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert + async def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): + from google.cloud.bigtable_v2.types import SampleRowKeysResponse + + for value in sample_list: + yield SampleRowKeysResponse(row_key=value[0], offset_bytes=value[1]) + + @CrossSync.pytest + async def test_sample_row_keys(self): + """ + Test that method returns the expected key samples + """ + samples = [ + (b"test_1", 0), + (b"test_2", 100), + (b"test_3", 200), + ] + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream(samples) + result = await table.sample_row_keys() + assert len(result) == 3 + assert all(isinstance(r, tuple) for r in result) + assert all(isinstance(r[0], bytes) for r in result) + assert all(isinstance(r[1], int) for r in result) + assert result[0] == samples[0] + assert result[1] == samples[1] + assert result[2] == samples[2] + + @CrossSync.pytest + async def test_sample_row_keys_bad_timeout(self): + """ + should raise error if timeout is negative + """ + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.sample_row_keys(operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + await table.sample_row_keys(attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + @CrossSync.pytest + async def test_sample_row_keys_default_timeout(self): + """Should fallback to using table default operation_timeout""" + expected_timeout = 99 + async with self._make_client() as client: + async with client.get_table( + "i", + "t", + default_operation_timeout=expected_timeout, + default_attempt_timeout=expected_timeout, + ) as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + result = await table.sample_row_keys() + _, kwargs = sample_row_keys.call_args + assert abs(kwargs["timeout"] - expected_timeout) < 0.1 + assert result == [] + assert kwargs["retry"] is None + + @CrossSync.pytest + async def test_sample_row_keys_gapic_params(self): + """ + make sure arguments are propagated to gapic call as expected + """ + expected_timeout = 10 + expected_profile = "test1" + instance = "instance_name" + table_id = "my_table" + async with self._make_client() as client: + async with client.get_table( + instance, table_id, app_profile_id=expected_profile + ) as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + await table.sample_row_keys(attempt_timeout=expected_timeout) + args, kwargs = sample_row_keys.call_args + assert len(args) == 0 + assert len(kwargs) == 3 + assert kwargs["timeout"] == expected_timeout + assert kwargs["retry"] is None + request = kwargs["request"] + assert request.app_profile_id == expected_profile + assert request.table_name == table.table_name + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_sample_row_keys_retryable_errors(self, retryable_exception): + """ + retryable errors should be retried until timeout + """ + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() + ) as sample_row_keys: + sample_row_keys.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + await table.sample_row_keys(operation_timeout=0.05) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) > 0 + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + @CrossSync.pytest + async def test_sample_row_keys_non_retryable_errors(self, non_retryable_exception): + """ + non-retryable errors should cause a raise + """ + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() + ) as sample_row_keys: + sample_row_keys.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + await table.sample_row_keys() + + +@CrossSync.convert_class("TestMutateRow") +class TestMutateRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.pytest + @pytest.mark.parametrize( + "mutation_arg", + [ + mutations.SetCell("family", b"qualifier", b"value"), + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ), + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromFamily("family"), + mutations.DeleteAllFromRow(), + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + async def test_mutate_row(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.return_value = None + await table.mutate_row( + "row_key", + mutation_arg, + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0].kwargs + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.row_key == b"row_key" + formatted_mutations = ( + [mutation._to_pb() for mutation in mutation_arg] + if isinstance(mutation_arg, list) + else [mutation_arg._to_pb()] + ) + assert request.mutations == formatted_mutations + assert kwargs["timeout"] == expected_attempt_timeout + # make sure gapic layer is not retrying + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_mutate_row_retryable_errors(self, retryable_exception): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + mutation = mutations.DeleteAllFromRow() + assert mutation.is_idempotent() is True + await table.mutate_row( + "row_key", mutation, operation_timeout=0.01 + ) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_mutate_row_non_idempotent_retryable_errors( + self, retryable_exception + ): + """ + Non-idempotent mutations should not be retried + """ + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(retryable_exception): + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + assert mutation.is_idempotent() is False + await table.mutate_row( + "row_key", mutation, operation_timeout=0.2 + ) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + @CrossSync.pytest + async def test_mutate_row_non_retryable_errors(self, non_retryable_exception): + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + mutation = mutations.SetCell( + "family", + b"qualifier", + b"value", + timestamp_micros=1234567890, + ) + assert mutation.is_idempotent() is True + await table.mutate_row( + "row_key", mutation, operation_timeout=0.2 + ) + + @pytest.mark.parametrize("mutations", [[], None]) + @CrossSync.pytest + async def test_mutate_row_no_mutations(self, mutations): + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.mutate_row("key", mutations=mutations) + assert e.value.args[0] == "No mutations provided" + + +@CrossSync.convert_class("TestBulkMutateRows") +class TestBulkMutateRowsAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert + async def _mock_response(self, response_list): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + statuses = [] + for response in response_list: + if isinstance(response, core_exceptions.GoogleAPICallError): + statuses.append( + status_pb2.Status( + message=str(response), code=response.grpc_status_code.value[0] + ) + ) + else: + statuses.append(status_pb2.Status(code=0)) + entries = [ + MutateRowsResponse.Entry(index=i, status=statuses[i]) + for i in range(len(response_list)) + ] + + @CrossSync.convert + async def generator(): + yield MutateRowsResponse(entries=entries) + + return generator() + + @CrossSync.pytest + @CrossSync.pytest + @pytest.mark.parametrize( + "mutation_arg", + [ + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ) + ], + [mutations.DeleteRangeFromColumn("family", b"qualifier")], + [mutations.DeleteAllFromFamily("family")], + [mutations.DeleteAllFromRow()], + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + async def test_bulk_mutate_rows(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None]) + bulk_mutation = mutations.RowMutationEntry(b"row_key", mutation_arg) + await table.bulk_mutate_rows( + [bulk_mutation], + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.entries == [bulk_mutation._to_pb()] + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + @CrossSync.pytest + async def test_bulk_mutate_rows_multiple_entries(self): + """Test mutations with no errors""" + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None, None]) + mutation_list = [mutations.DeleteAllFromRow()] + entry_1 = mutations.RowMutationEntry(b"row_key_1", mutation_list) + entry_2 = mutations.RowMutationEntry(b"row_key_2", mutation_list) + await table.bulk_mutate_rows( + [entry_1, entry_2], + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.entries[0] == entry_1._to_pb() + assert request.entries[1] == entry_2._to_pb() + + @CrossSync.pytest + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + async def test_bulk_mutate_rows_idempotent_mutation_error_retryable( + self, exception + ): + """ + Individual idempotent mutations should be retried if they fail with a retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], exception) + # last exception should be due to retry timeout + assert isinstance( + cause.exceptions[-1], core_exceptions.DeadlineExceeded + ) + + @CrossSync.pytest + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + core_exceptions.Aborted, + ], + ) + async def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable( + self, exception + ): + """ + Individual idempotent mutations should not be retried if they fail with a non-retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, exception) + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_bulk_mutate_idempotent_retryable_request_errors( + self, retryable_exception + ): + """ + Individual idempotent mutations should be retried if the request fails with a retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @CrossSync.pytest + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + async def test_bulk_mutate_rows_non_idempotent_retryable_errors( + self, retryable_exception + ): + """Non-Idempotent mutations should never be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [retryable_exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is False + await table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + ], + ) + @CrossSync.pytest + async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_exception): + """ + If the request fails with a non-retryable error, mutations should not be retried + """ + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, non_retryable_exception) + + @CrossSync.pytest + async def test_bulk_mutate_error_index(self): + """ + Test partial failure, partial success. Errors should be associated with the correct index + """ + from google.api_core.exceptions import ( + DeadlineExceeded, + ServiceUnavailable, + FailedPrecondition, + ) + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with self._make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + # fail with retryable errors, then a non-retryable one + mock_gapic.side_effect = [ + self._mock_response([None, ServiceUnavailable("mock"), None]), + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([FailedPrecondition("final")]), + ] + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry( + (f"row_key_{i}").encode(), [mutation] + ) + for i in range(3) + ] + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows(entries, operation_timeout=1000) + assert len(e.value.exceptions) == 1 + failed = e.value.exceptions[0] + assert isinstance(failed, FailedMutationEntryError) + assert failed.index == 1 + assert failed.entry == entries[1] + cause = failed.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) == 3 + assert isinstance(cause.exceptions[0], ServiceUnavailable) + assert isinstance(cause.exceptions[1], DeadlineExceeded) + assert isinstance(cause.exceptions[2], FailedPrecondition) + + @CrossSync.pytest + async def test_bulk_mutate_error_recovery(self): + """ + If an error occurs, then resolves, no exception should be raised + """ + from google.api_core.exceptions import DeadlineExceeded + + async with self._make_client(project="project") as client: + table = client.get_table("instance", "table") + with mock.patch.object(client._gapic_client, "mutate_rows") as mock_gapic: + # fail with a retryable error, then a non-retryable one + mock_gapic.side_effect = [ + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([None]), + ] + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry((f"row_key_{i}").encode(), [mutation]) + for i in range(3) + ] + await table.bulk_mutate_rows(entries, operation_timeout=1000) + + +@CrossSync.convert_class("TestCheckAndMutateRow") +class TestCheckAndMutateRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize("gapic_result", [True, False]) + @CrossSync.pytest + async def test_check_and_mutate(self, gapic_result): + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + app_profile = "app_profile_id" + async with self._make_client() as client: + async with client.get_table( + "instance", "table", app_profile_id=app_profile + ) as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=gapic_result + ) + row_key = b"row_key" + predicate = None + true_mutations = [DeleteAllFromRow()] + false_mutations = [DeleteAllFromRow(), DeleteAllFromRow()] + operation_timeout = 0.2 + found = await table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + operation_timeout=operation_timeout, + ) + assert found == gapic_result + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.table_name == table.table_name + assert request.row_key == row_key + assert bool(request.predicate_filter) is False + assert request.true_mutations == [ + m._to_pb() for m in true_mutations + ] + assert request.false_mutations == [ + m._to_pb() for m in false_mutations + ] + assert request.app_profile_id == app_profile + assert kwargs["timeout"] == operation_timeout + assert kwargs["retry"] is None + + @CrossSync.pytest + async def test_check_and_mutate_bad_timeout(self): + """Should raise error if operation_timeout < 0""" + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=[mock.Mock()], + false_case_mutations=[], + operation_timeout=-1, + ) + assert str(e.value) == "operation_timeout must be greater than 0" + + @CrossSync.pytest + async def test_check_and_mutate_single_mutations(self): + """if single mutations are passed, they should be internally wrapped in a list""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + true_mutation = SetCell("family", b"qualifier", b"value") + false_mutation = SetCell("family", b"qualifier", b"value") + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.true_mutations == [true_mutation._to_pb()] + assert request.false_mutations == [false_mutation._to_pb()] + + @CrossSync.pytest + async def test_check_and_mutate_predicate_object(self): + """predicate filter should be passed to gapic request""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable_v2.types.data import RowFilter + + mock_predicate = mock.Mock() + predicate_pb = RowFilter({"sink": True}) + mock_predicate._to_pb.return_value = predicate_pb + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + await table.check_and_mutate_row( + b"row_key", + mock_predicate, + false_case_mutations=[DeleteAllFromRow()], + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.predicate_filter == predicate_pb + assert mock_predicate._to_pb.call_count == 1 + assert kwargs["retry"] is None + + @CrossSync.pytest + async def test_check_and_mutate_mutations_parsing(self): + """mutations objects should be converted to protos""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily + + mutations = [mock.Mock() for _ in range(5)] + for idx, mutation in enumerate(mutations): + mutation._to_pb.return_value = DeleteAllFromFamily(f"fake {idx}")._to_pb() + mutations.append(DeleteAllFromRow()) + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=mutations[0:2], + false_case_mutations=mutations[2:], + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.true_mutations == [ + DeleteAllFromFamily("fake 0")._to_pb(), + DeleteAllFromFamily("fake 1")._to_pb(), + ] + assert request.false_mutations == [ + DeleteAllFromFamily("fake 2")._to_pb(), + DeleteAllFromFamily("fake 3")._to_pb(), + DeleteAllFromFamily("fake 4")._to_pb(), + DeleteAllFromRow()._to_pb(), + ] + assert all( + mutation._to_pb.call_count == 1 for mutation in mutations[:5] + ) + + +@CrossSync.convert_class("TestReadModifyWriteRow") +class TestReadModifyWriteRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize( + "call_rules,expected_rules", + [ + ( + AppendValueRule("f", "c", b"1"), + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + ( + [AppendValueRule("f", "c", b"1")], + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + (IncrementRule("f", "c", 1), [IncrementRule("f", "c", 1)._to_pb()]), + ( + [AppendValueRule("f", "c", b"1"), IncrementRule("f", "c", 1)], + [ + AppendValueRule("f", "c", b"1")._to_pb(), + IncrementRule("f", "c", 1)._to_pb(), + ], + ), + ], + ) + @CrossSync.pytest + async def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): + """ + Test that the gapic call is called with given rules + """ + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row("key", call_rules) + assert mock_gapic.call_count == 1 + found_kwargs = mock_gapic.call_args_list[0][1] + request = found_kwargs["request"] + assert request.rules == expected_rules + assert found_kwargs["retry"] is None + + @pytest.mark.parametrize("rules", [[], None]) + @CrossSync.pytest + async def test_read_modify_write_no_rules(self, rules): + async with self._make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.read_modify_write_row("key", rules=rules) + assert e.value.args[0] == "rules must contain at least one item" + + @CrossSync.pytest + async def test_read_modify_write_call_defaults(self): + instance = "instance1" + table_id = "table1" + project = "project1" + row_key = "row_key1" + async with self._make_client(project=project) as client: + async with client.get_table(instance, table_id) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row(row_key, IncrementRule("f", "q")) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert ( + request.table_name + == f"projects/{project}/instances/{instance}/tables/{table_id}" + ) + assert bool(request.app_profile_id) is False + assert request.row_key == row_key.encode() + assert kwargs["timeout"] > 1 + + @CrossSync.pytest + async def test_read_modify_write_call_overrides(self): + row_key = b"row_key1" + expected_timeout = 12345 + profile_id = "profile1" + async with self._make_client() as client: + async with client.get_table( + "instance", "table_id", app_profile_id=profile_id + ) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row( + row_key, + IncrementRule("f", "q"), + operation_timeout=expected_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert request.app_profile_id == profile_id + assert request.row_key == row_key + assert kwargs["timeout"] == expected_timeout + + @CrossSync.pytest + async def test_read_modify_write_string_key(self): + row_key = "string_row_key1" + async with self._make_client() as client: + async with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row(row_key, IncrementRule("f", "q")) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert request.row_key == row_key.encode() + + @CrossSync.pytest + async def test_read_modify_write_row_building(self): + """ + results from gapic call should be used to construct row + """ + from google.cloud.bigtable.data.row import Row + from google.cloud.bigtable_v2.types import ReadModifyWriteRowResponse + from google.cloud.bigtable_v2.types import Row as RowPB + + mock_response = ReadModifyWriteRowResponse(row=RowPB()) + async with self._make_client() as client: + async with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + with mock.patch.object(Row, "_from_pb") as constructor_mock: + mock_gapic.return_value = mock_response + await table.read_modify_write_row( + "key", IncrementRule("f", "q") + ) + assert constructor_mock.call_count == 1 + constructor_mock.assert_called_once_with(mock_response.row) + + +@CrossSync.convert_class("TestExecuteQuery") +class TestExecuteQueryAsync: + TABLE_NAME = "TABLE_NAME" + INSTANCE_NAME = "INSTANCE_NAME" + + @pytest.fixture(scope="function") + @CrossSync.convert + def client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.fixture(scope="function") + @CrossSync.convert + def execute_query_mock(self, client): + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + yield execute_query_mock + + @pytest.fixture(scope="function") + @CrossSync.convert + def prepare_mock(self, client): + with mock.patch.object( + client._gapic_client, "prepare_query", CrossSync.Mock() + ) as prepare_mock: + prepare_mock.return_value = prepare_response( + prepared_query=b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ) + yield prepare_mock + + @CrossSync.convert + def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): + class MockStream: + def __init__(self, sample_list): + self.sample_list = sample_list + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __next__(self): + if not self.sample_list: + raise CrossSync.StopIteration + value = self.sample_list.pop(0) + if isinstance(value, Exception): + raise value + return value + + async def __anext__(self): + return self.__next__() + + return MockStream(sample_list) + + @CrossSync.pytest + async def test_execute_query(self, client, execute_query_mock, prepare_mock): + values = [ + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_with_params( + self, client, execute_query_mock, prepare_mock + ): + values = [ + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r async for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_error_before_metadata( + self, client, execute_query_mock, prepare_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_error_after_metadata( + self, client, execute_query_mock, prepare_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @CrossSync.pytest + async def test_execute_query_with_retries( + self, client, execute_query_mock, prepare_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + DeadlineExceeded(""), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + DeadlineExceeded(""), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + assert prepare_mock.call_count == 1 + + @pytest.mark.parametrize( + "exception", + [ + (core_exceptions.DeadlineExceeded("")), + (core_exceptions.Aborted("")), + (core_exceptions.ServiceUnavailable("")), + ], + ) + @CrossSync.pytest + async def test_execute_query_retryable_error( + self, client, execute_query_mock, prepare_mock, exception + ): + [res1, res2] = chunked_responses( + 2, str_val("test"), int_val(8), reset=True, token=b"t1" + ) + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), + exception, + *chunked_responses(1, str_val("tes2"), int_val(9), reset=True, token=b"t1"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 2 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) + @CrossSync.pytest + async def test_execute_query_non_retryable( + self, client, execute_query_mock, prepare_mock, ExceptionType + ): + values = [ + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + ExceptionType(""), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = await CrossSync.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + + with pytest.raises(ExceptionType): + r = await CrossSync.next(result) + + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_prepare_query_retryable( + self, client, execute_query_mock, prepare_mock, retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 2 + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) + @CrossSync.pytest + async def test_prepare_query_non_retryable( + self, client, execute_query_mock, prepare_mock, non_retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + non_retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises(non_retryable_exception): + await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) diff --git a/tests/unit/data/_async/test_metrics_interceptor.py b/tests/unit/data/_async/test_metrics_interceptor.py new file mode 100644 index 000000000..1593b8c99 --- /dev/null +++ b/tests/unit/data/_async/test_metrics_interceptor.py @@ -0,0 +1,336 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from grpc import RpcError +from grpc import ClientCallDetails + +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import OperationState +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + +if CrossSync.is_async: + from google.cloud.bigtable.data._async.metrics_interceptor import ( + AsyncBigtableMetricsInterceptor, + ) +else: + from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401 + BigtableMetricsInterceptor, + ) + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_metrics_interceptor" + + +@CrossSync.convert(replace_symbols={"__aiter__": "__iter__"}) +def _make_mock_stream_call(values, exc=None): + """ + Create a mock call object that can be used for streaming calls + """ + call = CrossSync.Mock() + + async def gen(): + for val in values: + yield val + if exc: + raise exc + + call.__aiter__ = mock.Mock(return_value=gen()) + return call + + +@CrossSync.convert_class(sync_name="TestMetricsInterceptor") +class TestMetricsInterceptorAsync: + @staticmethod + @CrossSync.convert( + replace_symbols={ + "AsyncBigtableMetricsInterceptor": "BigtableMetricsInterceptor" + } + ) + def _get_target_class(): + return AsyncBigtableMetricsInterceptor + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + @CrossSync.pytest + async def test_unary_unary_interceptor_op_not_found(self): + """Test that interceptor call continuation if op is not found""" + instance = self._make_one() + continuation = CrossSync.Mock() + details = ClientCallDetails() + details.metadata = [] + request = mock.Mock() + await instance.intercept_unary_unary(continuation, details, request) + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_unary_interceptor_success(self): + """Test that interceptor handles successful unary-unary calls""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync.Mock() + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + result = await instance.intercept_unary_unary(continuation, details, request) + assert result == call + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + op.end_attempt_with_status.assert_not_called() + + @CrossSync.pytest + async def test_unary_unary_interceptor_failure(self): + """test a failed RpcError with metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + exc.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + exc.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + continuation = CrossSync.Mock(side_effect=exc) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + @CrossSync.pytest + async def test_unary_unary_interceptor_failure_no_metadata(self): + """test with RpcError without without metadata attached""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + continuation = CrossSync.Mock(side_effect=exc) + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_not_called() + + @CrossSync.pytest + async def test_unary_unary_interceptor_failure_generic(self): + """test generic exception""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = ValueError("test") + continuation = CrossSync.Mock(side_effect=exc) + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(ValueError) as e: + await instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_not_called() + + @CrossSync.pytest + async def test_unary_stream_interceptor_op_not_found(self): + """Test that interceptor calls continuation if op is not found""" + instance = self._make_one() + continuation = CrossSync.Mock() + details = ClientCallDetails() + details.metadata = [] + request = mock.Mock() + await instance.intercept_unary_stream(continuation, details, request) + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_stream_interceptor_success(self): + """Test that interceptor handles successful unary-stream calls""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + + continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2])) + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + wrapper = await instance.intercept_unary_stream(continuation, details, request) + results = [val async for val in wrapper] + assert results == [1, 2] + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + op.end_attempt_with_status.assert_not_called() + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_mid_stream(self): + """Test that interceptor handles failures mid-stream""" + from grpc.aio import AioRpcError, Metadata + + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = AioRpcError(0, Metadata(), Metadata(("a", "b"), ("c", "d"))) + continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1], exc=exc)) + details = ClientCallDetails() + request = mock.Mock() + wrapper = await instance.intercept_unary_stream(continuation, details, request) + with pytest.raises(AioRpcError) as e: + [val async for val in wrapper] + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_start_stream(self): + """Test that interceptor handles failures at start of stream with RpcError with metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + exc.trailing_metadata = CrossSync.Mock(return_value=[("a", "b")]) + exc.initial_metadata = CrossSync.Mock(return_value=[("c", "d")]) + + continuation = CrossSync.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_start_stream_no_metadata(self): + """Test that interceptor handles failures at start of stream with RpcError with no metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + + continuation = CrossSync.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_not_called() + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_start_stream_generic(self): + """Test that interceptor handles failures at start of stream with generic exception""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = ValueError("test") + + continuation = CrossSync.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(ValueError) as e: + await instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_not_called() + + @CrossSync.pytest + @pytest.mark.parametrize( + "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS] + ) + async def test_unary_unary_interceptor_start_operation(self, initial_state): + """if called with a newly created operation, it should be started""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = initial_state + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync.Mock() + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[]) + call.initial_metadata = CrossSync.Mock(return_value=[]) + details = ClientCallDetails() + request = mock.Mock() + await instance.intercept_unary_unary(continuation, details, request) + op.start_attempt.assert_called_once() + + @CrossSync.pytest + @pytest.mark.parametrize( + "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS] + ) + async def test_unary_stream_interceptor_start_operation(self, initial_state): + """if called with a newly created operation, it should be started""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = initial_state + ActiveOperationMetric._active_operation_context.set(op) + + continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2])) + call = continuation.return_value + call.trailing_metadata = CrossSync.Mock(return_value=[]) + call.initial_metadata = CrossSync.Mock(return_value=[]) + details = ClientCallDetails() + request = mock.Mock() + await instance.intercept_unary_stream(continuation, details, request) + op.start_attempt.assert_called_once() diff --git a/tests/unit/data/_async/test_mutations_batcher.py b/tests/unit/data/_async/test_mutations_batcher.py new file mode 100644 index 000000000..b139f31f1 --- /dev/null +++ b/tests/unit/data/_async/test_mutations_batcher.py @@ -0,0 +1,1239 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import mock +import asyncio +import time +import google.api_core.exceptions as core_exceptions +import google.api_core.retry +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.cloud.bigtable.data import TABLE_DEFAULT + +from google.cloud.bigtable.data._cross_sync import CrossSync + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_mutations_batcher" + + +@CrossSync.convert_class(sync_name="Test_FlowControl") +class Test_FlowControlAsync: + @staticmethod + @CrossSync.convert + def _target_class(): + return CrossSync._FlowControl + + def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): + return self._target_class()(max_mutation_count, max_mutation_bytes) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + mutation.size = lambda: size + return mutation + + def test_ctor(self): + max_mutation_count = 9 + max_mutation_bytes = 19 + instance = self._make_one(max_mutation_count, max_mutation_bytes) + assert instance._max_mutation_count == max_mutation_count + assert instance._max_mutation_bytes == max_mutation_bytes + assert instance._in_flight_mutation_count == 0 + assert instance._in_flight_mutation_bytes == 0 + assert isinstance(instance._capacity_condition, CrossSync.Condition) + + def test_ctor_invalid_values(self): + """Test that values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(0, 1) + assert "max_mutation_count must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(1, 0) + assert "max_mutation_bytes must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "max_count,max_size,existing_count,existing_size,new_count,new_size,expected", + [ + (1, 1, 0, 0, 0, 0, True), + (1, 1, 1, 1, 1, 1, False), + (10, 10, 0, 0, 0, 0, True), + (10, 10, 0, 0, 9, 9, True), + (10, 10, 0, 0, 11, 9, True), + (10, 10, 0, 1, 11, 9, True), + (10, 10, 1, 0, 11, 9, False), + (10, 10, 0, 0, 9, 11, True), + (10, 10, 1, 0, 9, 11, True), + (10, 10, 0, 1, 9, 11, False), + (10, 1, 0, 0, 1, 0, True), + (1, 10, 0, 0, 0, 8, True), + (float("inf"), float("inf"), 0, 0, 1e10, 1e10, True), + (8, 8, 0, 0, 1e10, 1e10, True), + (12, 12, 6, 6, 5, 5, True), + (12, 12, 5, 5, 6, 6, True), + (12, 12, 6, 6, 6, 6, True), + (12, 12, 6, 6, 7, 7, False), + # allow capacity check if new_count or new_size exceeds limits + (12, 12, 0, 0, 13, 13, True), + (12, 12, 12, 0, 0, 13, True), + (12, 12, 0, 12, 13, 0, True), + # but not if there's already values in flight + (12, 12, 1, 1, 13, 13, False), + (12, 12, 1, 1, 0, 13, False), + (12, 12, 1, 1, 13, 0, False), + ], + ) + def test__has_capacity( + self, + max_count, + max_size, + existing_count, + existing_size, + new_count, + new_size, + expected, + ): + """ + _has_capacity should return True if the new mutation will will not exceed the max count or size + """ + instance = self._make_one(max_count, max_size) + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + assert instance._has_capacity(new_count, new_size) == expected + + @CrossSync.pytest + @pytest.mark.parametrize( + "existing_count,existing_size,added_count,added_size,new_count,new_size", + [ + (0, 0, 0, 0, 0, 0), + (2, 2, 1, 1, 1, 1), + (2, 0, 1, 0, 1, 0), + (0, 2, 0, 1, 0, 1), + (10, 10, 0, 0, 10, 10), + (10, 10, 5, 5, 5, 5), + (0, 0, 1, 1, -1, -1), + ], + ) + async def test_remove_from_flow_value_update( + self, + existing_count, + existing_size, + added_count, + added_size, + new_count, + new_size, + ): + """ + completed mutations should lower the inflight values + """ + instance = self._make_one() + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + mutation = self._make_mutation(added_count, added_size) + await instance.remove_from_flow(mutation) + assert instance._in_flight_mutation_count == new_count + assert instance._in_flight_mutation_bytes == new_size + + @CrossSync.pytest + async def test__remove_from_flow_unlock(self): + """capacity condition should notify after mutation is complete""" + instance = self._make_one(10, 10) + instance._in_flight_mutation_count = 10 + instance._in_flight_mutation_bytes = 10 + + async def task_routine(): + async with instance._capacity_condition: + await instance._capacity_condition.wait_for( + lambda: instance._has_capacity(1, 1) + ) + + if CrossSync.is_async: + # for async class, build task to test flow unlock + task = asyncio.create_task(task_routine()) + + def task_alive(): + return not task.done() + + else: + # this branch will be tested in sync version of this test + import threading + + thread = threading.Thread(target=task_routine) + thread.start() + task_alive = thread.is_alive + await CrossSync.sleep(0.05) + # should be blocked due to capacity + assert task_alive() is True + # try changing size + mutation = self._make_mutation(count=0, size=5) + + await instance.remove_from_flow([mutation]) + await CrossSync.sleep(0.05) + assert instance._in_flight_mutation_count == 10 + assert instance._in_flight_mutation_bytes == 5 + assert task_alive() is True + # try changing count + instance._in_flight_mutation_bytes = 10 + mutation = self._make_mutation(count=5, size=0) + await instance.remove_from_flow([mutation]) + await CrossSync.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 10 + assert task_alive() is True + # try changing both + instance._in_flight_mutation_count = 10 + mutation = self._make_mutation(count=5, size=5) + await instance.remove_from_flow([mutation]) + await CrossSync.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 5 + # task should be complete + assert task_alive() is False + + @CrossSync.pytest + @pytest.mark.parametrize( + "mutations,count_cap,size_cap,expected_results", + [ + # high capacity results in no batching + ([(5, 5), (1, 1), (1, 1)], 10, 10, [[(5, 5), (1, 1), (1, 1)]]), + # low capacity splits up into batches + ([(1, 1), (1, 1), (1, 1)], 1, 1, [[(1, 1)], [(1, 1)], [(1, 1)]]), + # test count as limiting factor + ([(1, 1), (1, 1), (1, 1)], 2, 10, [[(1, 1), (1, 1)], [(1, 1)]]), + # test size as limiting factor + ([(1, 1), (1, 1), (1, 1)], 10, 2, [[(1, 1), (1, 1)], [(1, 1)]]), + # test with some bloackages and some flows + ( + [(1, 1), (5, 5), (4, 1), (1, 4), (1, 1)], + 5, + 5, + [[(1, 1)], [(5, 5)], [(4, 1), (1, 4)], [(1, 1)]], + ), + ], + ) + async def test_add_to_flow(self, mutations, count_cap, size_cap, expected_results): + """ + Test batching with various flow control settings + """ + mutation_objs = [self._make_mutation(count=m[0], size=m[1]) for m in mutations] + instance = self._make_one(count_cap, size_cap) + i = 0 + async for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + # check counts + assert len(batch[j].mutations) == expected_batch[j][0] + # check sizes + assert batch[j].size() == expected_batch[j][1] + # update lock + await instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @CrossSync.pytest + @pytest.mark.parametrize( + "mutations,max_limit,expected_results", + [ + ([(1, 1)] * 11, 10, [[(1, 1)] * 10, [(1, 1)]]), + ([(1, 1)] * 10, 1, [[(1, 1)] for _ in range(10)]), + ([(1, 1)] * 10, 2, [[(1, 1), (1, 1)] for _ in range(5)]), + ], + ) + async def test_add_to_flow_max_mutation_limits( + self, mutations, max_limit, expected_results + ): + """ + Test flow control running up against the max API limit + Should submit request early, even if the flow control has room for more + """ + subpath = "_async" if CrossSync.is_async else "_sync_autogen" + path = f"google.cloud.bigtable.data.{subpath}.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT" + with mock.patch(path, max_limit): + mutation_objs = [ + self._make_mutation(count=m[0], size=m[1]) for m in mutations + ] + # flow control has no limits except API restrictions + instance = self._make_one(float("inf"), float("inf")) + i = 0 + async for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + # check counts + assert len(batch[j].mutations) == expected_batch[j][0] + # check sizes + assert batch[j].size() == expected_batch[j][1] + # update lock + await instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @CrossSync.pytest + async def test_add_to_flow_oversize(self): + """ + mutations over the flow control limits should still be accepted + """ + instance = self._make_one(2, 3) + large_size_mutation = self._make_mutation(count=1, size=10) + large_count_mutation = self._make_mutation(count=10, size=1) + results = [out async for out in instance.add_to_flow([large_size_mutation])] + assert len(results) == 1 + await instance.remove_from_flow(results[0]) + count_results = [ + out async for out in instance.add_to_flow(large_count_mutation) + ] + assert len(count_results) == 1 + + +@CrossSync.convert_class(sync_name="TestMutationsBatcher") +class TestMutationsBatcherAsync: + @CrossSync.convert + def _get_target_class(self): + return CrossSync.MutationsBatcher + + def _make_one(self, table=None, **kwargs): + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import ServiceUnavailable + + if table is None: + table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 10 + table.default_mutate_rows_retryable_errors = ( + DeadlineExceeded, + ServiceUnavailable, + ) + + return self._get_target_class()(table, **kwargs) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.size = lambda: size + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + return mutation + + @CrossSync.pytest + async def test_ctor_defaults(self): + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = [Exception] + async with self._make_one(table) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._max_mutation_count == 100000 + assert instance._flow_control._max_mutation_bytes == 104857600 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert ( + instance._operation_timeout + == table.default_mutate_rows_operation_timeout + ) + assert ( + instance._attempt_timeout + == table.default_mutate_rows_attempt_timeout + ) + assert ( + instance._retryable_errors + == table.default_mutate_rows_retryable_errors + ) + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == 5 + assert isinstance(instance._flush_timer, CrossSync.Future) + + @CrossSync.pytest + async def test_ctor_explicit(self): + """Test with explicit parameters""" + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + flush_interval = 20 + flush_limit_count = 17 + flush_limit_bytes = 19 + flow_control_max_mutation_count = 1001 + flow_control_max_bytes = 12 + operation_timeout = 11 + attempt_timeout = 2 + retryable_errors = [Exception] + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=operation_timeout, + batch_attempt_timeout=attempt_timeout, + batch_retryable_errors=retryable_errors, + ) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert ( + instance._flow_control._max_mutation_count + == flow_control_max_mutation_count + ) + assert ( + instance._flow_control._max_mutation_bytes == flow_control_max_bytes + ) + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert instance._operation_timeout == operation_timeout + assert instance._attempt_timeout == attempt_timeout + assert instance._retryable_errors == retryable_errors + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == flush_interval + assert isinstance(instance._flush_timer, CrossSync.Future) + + @CrossSync.pytest + async def test_ctor_no_flush_limits(self): + """Test with None for flush limits""" + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = () + flush_interval = None + flush_limit_count = None + flush_limit_bytes = None + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + ) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._staged_entries == [] + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] is None + assert isinstance(instance._flush_timer, CrossSync.Future) + + @CrossSync.pytest + async def test_ctor_invalid_values(self): + """Test that timeout values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(batch_operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(batch_attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + @CrossSync.convert + def test_default_argument_consistency(self): + """ + We supply default arguments in MutationsBatcherAsync.__init__, and in + table.mutations_batcher. Make sure any changes to defaults are applied to + both places + """ + import inspect + + get_batcher_signature = dict( + inspect.signature(CrossSync.Table.mutations_batcher).parameters + ) + get_batcher_signature.pop("self") + batcher_init_signature = dict( + inspect.signature(self._get_target_class()).parameters + ) + batcher_init_signature.pop("table") + # both should have same number of arguments + assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) + assert len(get_batcher_signature) == 8 # update if expected params change + # both should have same argument names + assert set(get_batcher_signature.keys()) == set(batcher_init_signature.keys()) + # both should have same default values + for arg_name in get_batcher_signature.keys(): + assert ( + get_batcher_signature[arg_name].default + == batcher_init_signature[arg_name].default + ) + + @CrossSync.pytest + @pytest.mark.parametrize("input_val", [None, 0, -1]) + async def test__start_flush_timer_w_empty_input(self, input_val): + """Empty/invalid timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + # mock different method depending on sync vs async + async with self._make_one() as instance: + if CrossSync.is_async: + sleep_obj, sleep_method = asyncio, "wait_for" + else: + sleep_obj, sleep_method = instance._closed, "wait" + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + result = await instance._timer_routine(input_val) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + assert result is None + + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test__start_flush_timer_call_when_closed( + self, + ): + """closed batcher's timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + async with self._make_one() as instance: + await instance.close() + flush_mock.reset_mock() + # mock different method depending on sync vs async + if CrossSync.is_async: + sleep_obj, sleep_method = asyncio, "wait_for" + else: + sleep_obj, sleep_method = instance._closed, "wait" + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + await instance._timer_routine(10) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + + @CrossSync.pytest + @pytest.mark.parametrize("num_staged", [0, 1, 10]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test__flush_timer(self, num_staged): + """Timer should continue to call _schedule_flush in a loop""" + from google.cloud.bigtable.data._cross_sync import CrossSync + + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + expected_sleep = 12 + async with self._make_one(flush_interval=expected_sleep) as instance: + loop_num = 3 + instance._staged_entries = [mock.Mock()] * num_staged + with mock.patch.object(CrossSync, "event_wait") as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [TabError("expected")] + with pytest.raises(TabError): + await self._get_target_class()._timer_routine( + instance, expected_sleep + ) + if CrossSync.is_async: + # replace with np-op so there are no issues on close + instance._flush_timer = CrossSync.Future() + assert sleep_mock.call_count == loop_num + 1 + sleep_kwargs = sleep_mock.call_args[1] + assert sleep_kwargs["timeout"] == expected_sleep + assert flush_mock.call_count == (0 if num_staged == 0 else loop_num) + + @CrossSync.pytest + async def test__flush_timer_close(self): + """Timer should continue terminate after close""" + with mock.patch.object(self._get_target_class(), "_schedule_flush"): + async with self._make_one() as instance: + # let task run in background + assert instance._flush_timer.done() is False + # close the batcher + await instance.close() + # task should be complete + assert instance._flush_timer.done() is True + + @CrossSync.pytest + async def test_append_closed(self): + """Should raise exception""" + instance = self._make_one() + await instance.close() + with pytest.raises(RuntimeError): + await instance.append(mock.Mock()) + + @CrossSync.pytest + async def test_append_wrong_mutation(self): + """ + Mutation objects should raise an exception. + Only support RowMutationEntry + """ + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + async with self._make_one() as instance: + expected_error = "invalid mutation type: DeleteAllFromRow. Only RowMutationEntry objects are supported by batcher" + with pytest.raises(ValueError) as e: + await instance.append(DeleteAllFromRow()) + assert str(e.value) == expected_error + + @CrossSync.pytest + async def test_append_outside_flow_limits(self): + """entries larger than mutation limits are still processed""" + async with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + oversized_entry = self._make_mutation(count=0, size=2) + await instance.append(oversized_entry) + assert instance._staged_entries == [oversized_entry] + assert instance._staged_count == 0 + assert instance._staged_bytes == 2 + instance._staged_entries = [] + async with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + overcount_entry = self._make_mutation(count=2, size=0) + await instance.append(overcount_entry) + assert instance._staged_entries == [overcount_entry] + assert instance._staged_count == 2 + assert instance._staged_bytes == 0 + instance._staged_entries = [] + + @CrossSync.pytest + async def test_append_flush_runs_after_limit_hit(self): + """ + If the user appends a bunch of entries above the flush limits back-to-back, + it should still flush in a single task + """ + with mock.patch.object( + self._get_target_class(), "_execute_mutate_rows" + ) as op_mock: + async with self._make_one(flush_limit_bytes=100) as instance: + # mock network calls + async def mock_call(*args, **kwargs): + return [] + + op_mock.side_effect = mock_call + # append a mutation just under the size limit + await instance.append(self._make_mutation(size=99)) + # append a bunch of entries back-to-back in a loop + num_entries = 10 + for _ in range(num_entries): + await instance.append(self._make_mutation(size=1)) + # let any flush jobs finish + await instance._wait_for_batch_results(*instance._flush_jobs) + # should have only flushed once, with large mutation and first mutation in loop + assert op_mock.call_count == 1 + sent_batch = op_mock.call_args[0][0] + assert len(sent_batch) == 2 + # others should still be pending + assert len(instance._staged_entries) == num_entries - 1 + + @pytest.mark.parametrize( + "flush_count,flush_bytes,mutation_count,mutation_bytes,expect_flush", + [ + (10, 10, 1, 1, False), + (10, 10, 9, 9, False), + (10, 10, 10, 1, True), + (10, 10, 1, 10, True), + (10, 10, 10, 10, True), + (1, 1, 10, 10, True), + (1, 1, 0, 0, False), + ], + ) + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test_append( + self, flush_count, flush_bytes, mutation_count, mutation_bytes, expect_flush + ): + """test appending different mutations, and checking if it causes a flush""" + async with self._make_one( + flush_limit_mutation_count=flush_count, flush_limit_bytes=flush_bytes + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=mutation_count, size=mutation_bytes) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + await instance.append(mutation) + assert flush_mock.call_count == bool(expect_flush) + assert instance._staged_count == mutation_count + assert instance._staged_bytes == mutation_bytes + assert instance._staged_entries == [mutation] + instance._staged_entries = [] + + @CrossSync.pytest + async def test_append_multiple_sequentially(self): + """Append multiple mutations""" + async with self._make_one( + flush_limit_mutation_count=8, flush_limit_bytes=8 + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=2, size=3) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + await instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 2 + assert instance._staged_bytes == 3 + assert len(instance._staged_entries) == 1 + await instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 4 + assert instance._staged_bytes == 6 + assert len(instance._staged_entries) == 2 + await instance.append(mutation) + assert flush_mock.call_count == 1 + assert instance._staged_count == 6 + assert instance._staged_bytes == 9 + assert len(instance._staged_entries) == 3 + instance._staged_entries = [] + + @CrossSync.pytest + async def test_flush_flow_control_concurrent_requests(self): + """ + requests should happen in parallel if flow control breaks up single flush into batches + """ + import time + + num_calls = 10 + fake_mutations = [self._make_mutation(count=1) for _ in range(num_calls)] + async with self._make_one(flow_control_max_mutation_count=1) as instance: + with mock.patch.object( + instance, "_execute_mutate_rows", CrossSync.Mock() + ) as op_mock: + # mock network calls + async def mock_call(*args, **kwargs): + await CrossSync.sleep(0.1) + return [] + + op_mock.side_effect = mock_call + start_time = time.monotonic() + # flush one large batch, that will be broken up into smaller batches + instance._staged_entries = fake_mutations + instance._schedule_flush() + await CrossSync.sleep(0.01) + # make room for new mutations + for i in range(num_calls): + await instance._flow_control.remove_from_flow( + [self._make_mutation(count=1)] + ) + await CrossSync.sleep(0.01) + # allow flushes to complete + await instance._wait_for_batch_results(*instance._flush_jobs) + duration = time.monotonic() - start_time + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + # if flushes were sequential, total duration would be 1s + assert duration < 0.5 + assert op_mock.call_count == num_calls + + @CrossSync.pytest + async def test_schedule_flush_no_mutations(self): + """schedule flush should return None if no staged mutations""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + for i in range(3): + assert instance._schedule_flush() is None + assert flush_mock.call_count == 0 + + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test_schedule_flush_with_mutations(self): + """if new mutations exist, should add a new flush task to _flush_jobs""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + if not CrossSync.is_async: + # simulate operation + flush_mock.side_effect = lambda x: time.sleep(0.1) + for i in range(1, 4): + mutation = mock.Mock() + instance._staged_entries = [mutation] + instance._schedule_flush() + assert instance._staged_entries == [] + # let flush task run + await asyncio.sleep(0) + assert instance._staged_entries == [] + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert flush_mock.call_count == 1 + flush_mock.reset_mock() + + @CrossSync.pytest + async def test__flush_internal(self): + """ + _flush_internal should: + - await previous flush call + - delegate batching to _flow_control + - call _execute_mutate_rows on each batch + - update self.exceptions and self._entries_processed_since_last_raise + """ + num_entries = 10 + async with self._make_one() as instance: + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + # mock flow control to always return a single batch + async def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + await instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + @CrossSync.pytest + async def test_flush_clears_job_list(self): + """ + a job should be added to _flush_jobs when _schedule_flush is called, + and removed when it completes + """ + async with self._make_one() as instance: + with mock.patch.object( + instance, "_flush_internal", CrossSync.Mock() + ) as flush_mock: + if not CrossSync.is_async: + # simulate operation + flush_mock.side_effect = lambda x: time.sleep(0.1) + mutations = [self._make_mutation(count=1, size=1)] + instance._staged_entries = mutations + assert instance._flush_jobs == set() + new_job = instance._schedule_flush() + assert instance._flush_jobs == {new_job} + if CrossSync.is_async: + await new_job + else: + new_job.result() + assert instance._flush_jobs == set() + + @pytest.mark.parametrize( + "num_starting,num_new_errors,expected_total_errors", + [ + (0, 0, 0), + (0, 1, 1), + (0, 2, 2), + (1, 0, 1), + (1, 1, 2), + (10, 2, 12), + (10, 20, 20), # should cap at 20 + ], + ) + @CrossSync.pytest + async def test__flush_internal_with_errors( + self, num_starting, num_new_errors, expected_total_errors + ): + """ + errors returned from _execute_mutate_rows should be added to internal exceptions + """ + from google.cloud.bigtable.data import exceptions + + num_entries = 10 + expected_errors = [ + exceptions.FailedMutationEntryError(mock.Mock(), mock.Mock(), ValueError()) + ] * num_new_errors + async with self._make_one() as instance: + instance._oldest_exceptions = [mock.Mock()] * num_starting + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + execute_mock.return_value = expected_errors + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + # mock flow control to always return a single batch + async def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + await instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + found_exceptions = instance._oldest_exceptions + list( + instance._newest_exceptions + ) + assert len(found_exceptions) == expected_total_errors + for i in range(num_starting, expected_total_errors): + assert found_exceptions[i] == expected_errors[i - num_starting] + # errors should have index stripped + assert found_exceptions[i].index is None + # clear out exceptions + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + @CrossSync.convert + async def _mock_gapic_return(self, num=5): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + @CrossSync.convert + async def gen(num): + for i in range(num): + entry = MutateRowsResponse.Entry( + index=i, status=status_pb2.Status(code=0) + ) + yield MutateRowsResponse(entries=[entry]) + + return gen(num) + + @CrossSync.pytest + async def test_timer_flush_end_to_end(self): + """Flush should automatically trigger after flush_interval""" + num_mutations = 10 + mutations = [self._make_mutation(count=2, size=2)] * num_mutations + + async with self._make_one(flush_interval=0.05) as instance: + instance._target.default_operation_timeout = 10 + instance._target.default_attempt_timeout = 9 + with mock.patch.object( + instance._target.client._gapic_client, "mutate_rows" + ) as gapic_mock: + gapic_mock.side_effect = ( + lambda *args, **kwargs: self._mock_gapic_return(num_mutations) + ) + for m in mutations: + await instance.append(m) + assert instance._entries_processed_since_last_raise == 0 + # let flush trigger due to timer + await CrossSync.sleep(0.1) + assert instance._entries_processed_since_last_raise == num_mutations + + @CrossSync.pytest + async def test__execute_mutate_rows(self): + with mock.patch.object(CrossSync, "_MutateRowsOperation") as mutate_rows: + mutate_rows.return_value = CrossSync.Mock() + start_operation = mutate_rows().start + table = mock.Mock() + table.table_name = "test-table" + table.app_profile_id = "test-app-profile" + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert start_operation.call_count == 1 + args, kwargs = mutate_rows.call_args + assert args[0] == table.client._gapic_client + assert args[1] == table + assert args[2] == batch + kwargs["operation_timeout"] == 17 + kwargs["attempt_timeout"] == 13 + assert result == [] + + @CrossSync.pytest + async def test__execute_mutate_rows_returns_errors(self): + """Errors from operation should be retruned as list""" + from google.cloud.bigtable.data.exceptions import ( + MutationsExceptionGroup, + FailedMutationEntryError, + ) + + with mock.patch.object(CrossSync._MutateRowsOperation, "start") as mutate_rows: + err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) + err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) + mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert len(result) == 2 + assert result[0] == err1 + assert result[1] == err2 + # indices should be set to None + assert result[0].index is None + assert result[1].index is None + + @CrossSync.pytest + async def test__raise_exceptions(self): + """Raise exceptions and reset error state""" + from google.cloud.bigtable.data import exceptions + + expected_total = 1201 + expected_exceptions = [RuntimeError("mock")] * 3 + async with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance._raise_exceptions() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + instance._oldest_exceptions, instance._newest_exceptions = ([], []) + # try calling again + instance._raise_exceptions() + + @CrossSync.pytest + @CrossSync.convert( + sync_name="test___enter__", replace_symbols={"__aenter__": "__enter__"} + ) + async def test___aenter__(self): + """Should return self""" + async with self._make_one() as instance: + assert await instance.__aenter__() == instance + + @CrossSync.pytest + @CrossSync.convert( + sync_name="test___exit__", replace_symbols={"__aexit__": "__exit__"} + ) + async def test___aexit__(self): + """aexit should call close""" + async with self._make_one() as instance: + with mock.patch.object(instance, "close") as close_mock: + await instance.__aexit__(None, None, None) + assert close_mock.call_count == 1 + + @CrossSync.pytest + async def test_close(self): + """Should clean up all resources""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + with mock.patch.object(instance, "_raise_exceptions") as raise_mock: + await instance.close() + assert instance.closed is True + assert instance._flush_timer.done() is True + assert instance._flush_jobs == set() + assert flush_mock.call_count == 1 + assert raise_mock.call_count == 1 + + @CrossSync.pytest + async def test_close_w_exceptions(self): + """Raise exceptions on close""" + from google.cloud.bigtable.data import exceptions + + expected_total = 10 + expected_exceptions = [RuntimeError("mock")] + async with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + await instance.close() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + # clear out exceptions + instance._oldest_exceptions, instance._newest_exceptions = ([], []) + + @CrossSync.pytest + async def test__on_exit(self, recwarn): + """Should raise warnings if unflushed mutations exist""" + async with self._make_one() as instance: + # calling without mutations is noop + instance._on_exit() + assert len(recwarn) == 0 + # calling with existing mutations should raise warning + num_left = 4 + instance._staged_entries = [mock.Mock()] * num_left + with pytest.warns(UserWarning) as w: + instance._on_exit() + assert len(w) == 1 + assert "unflushed mutations" in str(w[0].message).lower() + assert str(num_left) in str(w[0].message) + # calling while closed is noop + instance._closed.set() + instance._on_exit() + assert len(recwarn) == 0 + # reset staged mutations for cleanup + instance._staged_entries = [] + + @CrossSync.pytest + async def test_atexit_registration(self): + """Should run _on_exit on program termination""" + import atexit + + with mock.patch.object(atexit, "register") as register_mock: + assert register_mock.call_count == 0 + async with self._make_one(): + assert register_mock.call_count == 1 + + @CrossSync.pytest + async def test_timeout_args_passed(self): + """ + batch_operation_timeout and batch_attempt_timeout should be used + in api calls + """ + with mock.patch.object( + CrossSync, "_MutateRowsOperation", return_value=CrossSync.Mock() + ) as mutate_rows: + expected_operation_timeout = 17 + expected_attempt_timeout = 13 + async with self._make_one( + batch_operation_timeout=expected_operation_timeout, + batch_attempt_timeout=expected_attempt_timeout, + ) as instance: + assert instance._operation_timeout == expected_operation_timeout + assert instance._attempt_timeout == expected_attempt_timeout + # make simulated gapic call + await instance._execute_mutate_rows([self._make_mutation()]) + assert mutate_rows.call_count == 1 + kwargs = mutate_rows.call_args[1] + assert kwargs["operation_timeout"] == expected_operation_timeout + assert kwargs["attempt_timeout"] == expected_attempt_timeout + + @pytest.mark.parametrize( + "limit,in_e,start_e,end_e", + [ + (10, 0, (10, 0), (10, 0)), + (1, 10, (0, 0), (1, 1)), + (10, 1, (0, 0), (1, 0)), + (10, 10, (0, 0), (10, 0)), + (10, 11, (0, 0), (10, 1)), + (3, 20, (0, 0), (3, 3)), + (10, 20, (0, 0), (10, 10)), + (10, 21, (0, 0), (10, 10)), + (2, 1, (2, 0), (2, 1)), + (2, 1, (1, 0), (2, 0)), + (2, 2, (1, 0), (2, 1)), + (3, 1, (3, 1), (3, 2)), + (3, 3, (3, 1), (3, 3)), + (1000, 5, (999, 0), (1000, 4)), + (1000, 5, (0, 0), (5, 0)), + (1000, 5, (1000, 0), (1000, 5)), + ], + ) + def test__add_exceptions(self, limit, in_e, start_e, end_e): + """ + Test that the _add_exceptions function properly updates the + _oldest_exceptions and _newest_exceptions lists + Args: + - limit: the _exception_list_limit representing the max size of either list + - in_e: size of list of exceptions to send to _add_exceptions + - start_e: a tuple of ints representing the initial sizes of _oldest_exceptions and _newest_exceptions + - end_e: a tuple of ints representing the expected sizes of _oldest_exceptions and _newest_exceptions + """ + from collections import deque + + input_list = [RuntimeError(f"mock {i}") for i in range(in_e)] + mock_batcher = mock.Mock() + mock_batcher._oldest_exceptions = [ + RuntimeError(f"starting mock {i}") for i in range(start_e[0]) + ] + mock_batcher._newest_exceptions = deque( + [RuntimeError(f"starting mock {i}") for i in range(start_e[1])], + maxlen=limit, + ) + mock_batcher._exception_list_limit = limit + mock_batcher._exceptions_since_last_raise = 0 + self._get_target_class()._add_exceptions(mock_batcher, input_list) + assert len(mock_batcher._oldest_exceptions) == end_e[0] + assert len(mock_batcher._newest_exceptions) == end_e[1] + assert mock_batcher._exceptions_since_last_raise == in_e + # make sure that the right items ended up in the right spots + # should fill the oldest slots first + oldest_list_diff = end_e[0] - start_e[0] + # new items should by added on top of the starting list + newest_list_diff = min(max(in_e - oldest_list_diff, 0), limit) + for i in range(oldest_list_diff): + assert mock_batcher._oldest_exceptions[i + start_e[0]] == input_list[i] + # then, the newest slots should be filled with the last items of the input list + for i in range(1, newest_list_diff + 1): + assert mock_batcher._newest_exceptions[-i] == input_list[-i] + + @CrossSync.pytest + # test different inputs for retryable exceptions + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + core_exceptions.Cancelled, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + @CrossSync.convert + async def test_customizable_retryable_errors( + self, input_retryables, expected_retryables + ): + """ + Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer. + """ + with mock.patch.object( + google.api_core.retry, "if_exception_type" + ) as predicate_builder_mock: + with mock.patch.object(CrossSync, "retry_target") as retry_fn_mock: + table = None + with mock.patch("asyncio.create_task"): + table = CrossSync.Table(mock.Mock(), "instance", "table") + async with self._make_one( + table, batch_retryable_errors=input_retryables + ) as instance: + assert instance._retryable_errors == expected_retryables + expected_predicate = expected_retryables.__contains__ + predicate_builder_mock.return_value = expected_predicate + retry_fn_mock.side_effect = RuntimeError("stop early") + mutation = self._make_mutation(count=1, size=1) + await instance._execute_mutate_rows([mutation]) + # passed in errors should be used to build the predicate + predicate_builder_mock.assert_called_once_with( + *expected_retryables, _MutateRowsIncomplete + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + # output of if_exception_type should be sent in to retry constructor + assert retry_call_args[1] is expected_predicate + + @CrossSync.pytest + async def test_large_batch_write(self): + """ + Test that a large batch of mutations can be written + """ + import math + + num_mutations = 10_000 + flush_limit = 1000 + mutations = [self._make_mutation(count=1, size=1)] * num_mutations + async with self._make_one(flush_limit_mutation_count=flush_limit) as instance: + operation_mock = mock.Mock() + rpc_call_mock = CrossSync.Mock() + operation_mock().start = rpc_call_mock + CrossSync._MutateRowsOperation = operation_mock + for m in mutations: + await instance.append(m) + expected_calls = math.ceil(num_mutations / flush_limit) + assert rpc_call_mock.call_count == expected_calls + assert instance._entries_processed_since_last_raise == num_mutations + assert len(instance._staged_entries) == 0 diff --git a/tests/unit/data/_async/test_read_rows_acceptance.py b/tests/unit/data/_async/test_read_rows_acceptance.py new file mode 100644 index 000000000..ab9502223 --- /dev/null +++ b/tests/unit/data/_async/test_read_rows_acceptance.py @@ -0,0 +1,355 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import warnings +import pytest +import mock + +from itertools import zip_longest + +from google.cloud.bigtable_v2 import ReadRowsResponse + +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.row import Row + +from ...v2_client.test_row_merger import ReadRowsTest, TestFile + +from google.cloud.bigtable.data._cross_sync import CrossSync + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_read_rows_acceptance" + + +@CrossSync.convert_class( + sync_name="TestReadRowsAcceptance", +) +class TestReadRowsAcceptanceAsync: + @staticmethod + @CrossSync.convert + def _get_operation_class(): + return CrossSync._ReadRowsOperation + + @staticmethod + @CrossSync.convert + def _get_client_class(): + return CrossSync.DataClient + + def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "../read-rows-acceptance-test.json") + + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + @staticmethod + def extract_results_from_row(row: Row): + results = [] + for family, col, cells in row.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_ns // 1000, + value=cell.value, + label=(cell.labels[0] if cell.labels else ""), + ) + ) + return results + + @staticmethod + @CrossSync.convert + async def _coro_wrapper(stream): + return stream + + @CrossSync.convert + async def _process_chunks(self, *chunks): + @CrossSync.convert + async def _row_stream(): + yield ReadRowsResponse(chunks=chunks) + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + results = [] + async for row in merger: + results.append(row) + return results + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + @CrossSync.pytest + async def test_row_merger_scenario(self, test_case: ReadRowsTest): + async def _scenerio_stream(): + for chunk in test_case.chunks: + yield ReadRowsResponse(chunks=[chunk]) + + try: + results = [] + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_scenerio_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + async for row in merger: + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + @CrossSync.pytest + async def test_read_rows_scenario(self, test_case: ReadRowsTest): + async def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list): + self.chunk_list = chunk_list + self.idx = -1 + + def __aiter__(self): + return self + + def __iter__(self): + return self + + async def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + chunk = self.chunk_list[self.idx] + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync.StopIteration + + def __next__(self): + return self.__anext__() + + def cancel(self): + pass + + return mock_stream(chunk_list) + + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # use emulator mode to avoid auth issues in CI + client = self._get_client_class()() + try: + table = client.get_table("instance", "table") + results = [] + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + # run once, then return error on retry + read_rows.return_value = _make_gapic_stream(test_case.chunks) + async for row in await table.read_rows_stream(query={}): + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + finally: + await client.close() + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @CrossSync.pytest + async def test_out_of_order_rows(self): + async def _row_stream(): + yield ReadRowsResponse(last_scanned_row_key=b"a") + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = b"b" + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + with pytest.raises(InvalidChunk): + async for _ in merger: + pass + + @CrossSync.pytest + async def test_bare_reset(self): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + @CrossSync.pytest + async def test_missing_family(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + @CrossSync.pytest + async def test_mid_cell_row_key_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + @CrossSync.pytest + async def test_mid_cell_family_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + family_name="f2", value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_qualifier_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + qualifier=b"q2", value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_timestamp_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_labels_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) diff --git a/tests/unit/data/_cross_sync/test_cross_sync.py b/tests/unit/data/_cross_sync/test_cross_sync.py new file mode 100644 index 000000000..410f59437 --- /dev/null +++ b/tests/unit/data/_cross_sync/test_cross_sync.py @@ -0,0 +1,579 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import typing +import asyncio +import pytest +import pytest_asyncio +import threading +import concurrent.futures +import time +import queue +import functools +import sys +from google import api_core +from google.cloud.bigtable.data._cross_sync.cross_sync import CrossSync, T + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore + + +class TestCrossSync: + async def async_iter(self, in_list): + for i in in_list: + yield i + + @pytest.fixture + def cs_sync(self): + return CrossSync._Sync_Impl + + @pytest_asyncio.fixture + def cs_async(self): + return CrossSync + + @pytest.mark.parametrize( + "attr, async_version, sync_version", + [ + ("is_async", True, False), + ("sleep", asyncio.sleep, time.sleep), + ( + "retry_target", + api_core.retry.retry_target_async, + api_core.retry.retry_target, + ), + ( + "retry_target_stream", + api_core.retry.retry_target_stream_async, + api_core.retry.retry_target_stream, + ), + ("Retry", api_core.retry.AsyncRetry, api_core.retry.Retry), + ("Queue", asyncio.Queue, queue.Queue), + ("Condition", asyncio.Condition, threading.Condition), + ("Future", asyncio.Future, concurrent.futures.Future), + ("Task", asyncio.Task, concurrent.futures.Future), + ("Event", asyncio.Event, threading.Event), + ("Semaphore", asyncio.Semaphore, threading.Semaphore), + ("StopIteration", StopAsyncIteration, StopIteration), + # types + ("Awaitable", typing.Awaitable, typing.Union[T]), + ("Iterable", typing.AsyncIterable, typing.Iterable), + ("Iterator", typing.AsyncIterator, typing.Iterator), + ("Generator", typing.AsyncGenerator, typing.Generator), + ], + ) + def test_alias_attributes( + self, attr, async_version, sync_version, cs_sync, cs_async + ): + """ + Test basic alias attributes, to ensure they point to the right place + in both sync and async versions. + """ + assert ( + getattr(cs_async, attr) == async_version + ), f"Failed async version for {attr}" + assert getattr(cs_sync, attr) == sync_version, f"Failed sync version for {attr}" + + @pytest.mark.asyncio + async def test_Mock(self, cs_sync, cs_async): + """ + Test Mock class in both sync and async versions + """ + import unittest.mock + + assert isinstance(cs_async.Mock(), AsyncMock) + assert isinstance(cs_sync.Mock(), unittest.mock.Mock) + # test with return value + assert await cs_async.Mock(return_value=1)() == 1 + assert cs_sync.Mock(return_value=1)() == 1 + + def test_next(self, cs_sync): + """ + Test sync version of CrossSync.next() + """ + it = iter([1, 2, 3]) + assert cs_sync.next(it) == 1 + assert cs_sync.next(it) == 2 + assert cs_sync.next(it) == 3 + with pytest.raises(StopIteration): + cs_sync.next(it) + with pytest.raises(cs_sync.StopIteration): + cs_sync.next(it) + + @pytest.mark.asyncio + async def test_next_async(self, cs_async): + """ + test async version of CrossSync.next() + """ + async_it = self.async_iter([1, 2, 3]) + assert await cs_async.next(async_it) == 1 + assert await cs_async.next(async_it) == 2 + assert await cs_async.next(async_it) == 3 + with pytest.raises(StopAsyncIteration): + await cs_async.next(async_it) + with pytest.raises(cs_async.StopIteration): + await cs_async.next(async_it) + + def test_gather_partials(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 for i in range(5)] + results = cs_sync.gather_partials(partials, sync_executor=e) + assert results == [1, 2, 3, 4, 5] + + def test_gather_partials_with_excepptions(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() with exceptions + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 if i != 3 else 1 / 0 for i in range(5)] + with pytest.raises(ZeroDivisionError): + cs_sync.gather_partials(partials, sync_executor=e) + + def test_gather_partials_return_exceptions(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() with return_exceptions=True + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 if i != 3 else 1 / 0 for i in range(5)] + results = cs_sync.gather_partials( + partials, return_exceptions=True, sync_executor=e + ) + assert len(results) == 5 + assert results[0] == 1 + assert results[1] == 2 + assert results[2] == 3 + assert isinstance(results[3], ZeroDivisionError) + assert results[4] == 5 + + def test_gather_partials_no_executor(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() without an executor + """ + partials = [lambda i=i: i + 1 for i in range(5)] + with pytest.raises(ValueError) as e: + cs_sync.gather_partials(partials) + assert "sync_executor is required" in str(e.value) + + @pytest.mark.asyncio + async def test_gather_partials_async(self, cs_async): + """ + Test async version of CrossSync.gather_partials() + """ + + async def coro(i): + return i + 1 + + partials = [functools.partial(coro, i) for i in range(5)] + results = await cs_async.gather_partials(partials) + assert results == [1, 2, 3, 4, 5] + + @pytest.mark.asyncio + async def test_gather_partials_async_with_exceptions(self, cs_async): + """ + Test async version of CrossSync.gather_partials() with exceptions + """ + + async def coro(i): + return i + 1 if i != 3 else 1 / 0 + + partials = [functools.partial(coro, i) for i in range(5)] + with pytest.raises(ZeroDivisionError): + await cs_async.gather_partials(partials) + + @pytest.mark.asyncio + async def test_gather_partials_async_return_exceptions(self, cs_async): + """ + Test async version of CrossSync.gather_partials() with return_exceptions=True + """ + + async def coro(i): + return i + 1 if i != 3 else 1 / 0 + + partials = [functools.partial(coro, i) for i in range(5)] + results = await cs_async.gather_partials(partials, return_exceptions=True) + assert len(results) == 5 + assert results[0] == 1 + assert results[1] == 2 + assert results[2] == 3 + assert isinstance(results[3], ZeroDivisionError) + assert results[4] == 5 + + @pytest.mark.asyncio + async def test_gather_partials_async_uses_asyncio_gather(self, cs_async): + """ + CrossSync.gather_partials() should use asyncio.gather() internally + """ + + async def coro(i): + return i + 1 + + return_exceptions = object() + partials = [functools.partial(coro, i) for i in range(5)] + with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: + await cs_async.gather_partials( + partials, return_exceptions=return_exceptions + ) + gather.assert_called_once() + found_args, found_kwargs = gather.call_args + assert found_kwargs["return_exceptions"] == return_exceptions + for coro in found_args: + await coro + + def test_wait(self, cs_sync): + """ + Test sync version of CrossSync.wait() + + If future is complete, it should be in the first (complete) set + """ + future = concurrent.futures.Future() + future.set_result(1) + s1, s2 = cs_sync.wait([future]) + assert s1 == {future} + assert s2 == set() + + def test_wait_timeout(self, cs_sync): + """ + If timeout occurs, future should be in the second (incomplete) set + """ + future = concurrent.futures.Future() + timeout = 0.1 + start_time = time.monotonic() + s1, s2 = cs_sync.wait([future], timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + assert s1 == set() + assert s2 == {future} + + def test_wait_passthrough(self, cs_sync): + """ + sync version of CrossSync.wait() should pass through to concurrent.futures.wait() + """ + future = object() + timeout = object() + with mock.patch.object(concurrent.futures, "wait", mock.Mock()) as wait: + result = cs_sync.wait([future], timeout) + assert wait.call_count == 1 + assert wait.call_args == (([future],), {"timeout": timeout}) + assert result == wait.return_value + + def test_wait_empty_input(self, cs_sync): + """ + If no futures are provided, return empty sets + """ + s1, s2 = cs_sync.wait([]) + assert s1 == set() + assert s2 == set() + + @pytest.mark.asyncio + async def test_wait_async(self, cs_async): + """ + Test async version of CrossSync.wait() + """ + future = asyncio.Future() + future.set_result(1) + s1, s2 = await cs_async.wait([future]) + assert s1 == {future} + assert s2 == set() + + @pytest.mark.asyncio + async def test_wait_async_timeout(self, cs_async): + """ + If timeout occurs, future should be in the second (incomplete) set + """ + future = asyncio.Future() + timeout = 0.1 + start_time = time.monotonic() + s1, s2 = await cs_async.wait([future], timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + assert s1 == set() + assert s2 == {future} + + @pytest.mark.asyncio + async def test_wait_async_passthrough(self, cs_async): + """ + async version of CrossSync.wait() should pass through to asyncio.wait() + """ + future = object() + timeout = object() + with mock.patch.object(asyncio, "wait", AsyncMock()) as wait: + result = await cs_async.wait([future], timeout) + assert wait.call_count == 1 + assert wait.call_args == (([future],), {"timeout": timeout}) + assert result == wait.return_value + + @pytest.mark.asyncio + async def test_wait_async_empty_input(self, cs_async): + """ + If no futures are provided, return empty sets + """ + s1, s2 = await cs_async.wait([]) + assert s1 == set() + assert s2 == set() + + def test_event_wait_passthrough(self, cs_sync): + """ + Test sync version of CrossSync.event_wait() + should pass through timeout directly to the event.wait() call + """ + event = mock.Mock() + timeout = object() + cs_sync.event_wait(event, timeout) + event.wait.assert_called_once_with(timeout=timeout) + + @pytest.mark.parametrize("timeout", [0, 0.01, 0.05]) + def test_event_wait_timeout_exceeded(self, cs_sync, timeout): + """ + Test sync version of CrossSync.event_wait() + """ + event = threading.Event() + start_time = time.monotonic() + cs_sync.event_wait(event, timeout=timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + + def test_event_wait_already_set(self, cs_sync): + """ + if event is already set, do not block + """ + event = threading.Event() + event.set() + start_time = time.monotonic() + cs_sync.event_wait(event, timeout=10) + end_time = time.monotonic() + assert end_time - start_time < 0.01 + + @pytest.mark.parametrize("break_early", [True, False]) + @pytest.mark.asyncio + async def test_event_wait_async(self, cs_async, break_early): + """ + With no timeout, call event.wait() with no arguments + """ + event = AsyncMock() + await cs_async.event_wait(event, async_break_early=break_early) + event.wait.assert_called_once_with() + + @pytest.mark.asyncio + async def test_event_wait_async_with_timeout(self, cs_async): + """ + In with timeout set, should call event.wait(), wrapped in wait_for() + for the timeout + """ + event = mock.Mock() + event.wait.return_value = object() + timeout = object() + with mock.patch.object(asyncio, "wait_for", AsyncMock()) as wait_for: + await cs_async.event_wait(event, timeout=timeout) + assert wait_for.await_count == 1 + assert wait_for.call_count == 1 + wait_for.assert_called_once_with(event.wait(), timeout=timeout) + + @pytest.mark.asyncio + async def test_event_wait_async_timeout_exceeded(self, cs_async): + """ + If tiemout exceeded, break without throwing exception + """ + event = asyncio.Event() + timeout = 0.5 + start_time = time.monotonic() + await cs_async.event_wait(event, timeout=timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + + @pytest.mark.parametrize("break_early", [True, False]) + @pytest.mark.asyncio + async def test_event_wait_async_already_set(self, cs_async, break_early): + """ + if event is already set, return immediately + """ + event = AsyncMock() + event.is_set = lambda: True + start_time = time.monotonic() + await cs_async.event_wait(event, async_break_early=break_early) + end_time = time.monotonic() + assert abs(end_time - start_time) < 0.01 + + @pytest.mark.asyncio + async def test_event_wait_no_break_early(self, cs_async): + """ + if async_break_early is False, and the event is not set, + simply sleep for the timeout + """ + event = mock.Mock() + event.is_set.return_value = False + timeout = object() + with mock.patch.object(asyncio, "sleep", AsyncMock()) as sleep: + await cs_async.event_wait(event, timeout=timeout, async_break_early=False) + sleep.assert_called_once_with(timeout) + + def test_create_task(self, cs_sync): + """ + Test creating Future using create_task() + """ + executor = concurrent.futures.ThreadPoolExecutor() + fn = lambda x, y: x + y # noqa: E731 + result = cs_sync.create_task(fn, 1, y=4, sync_executor=executor) + assert isinstance(result, cs_sync.Task) + assert result.result() == 5 + + def test_create_task_passthrough(self, cs_sync): + """ + sync version passed through to executor.submit() + """ + fn = object() + executor = mock.Mock() + executor.submit.return_value = object() + args = [1, 2, 3] + kwargs = {"a": 1, "b": 2} + result = cs_sync.create_task(fn, *args, **kwargs, sync_executor=executor) + assert result == executor.submit.return_value + assert executor.submit.call_count == 1 + assert executor.submit.call_args == ((fn, *args), kwargs) + + def test_create_task_no_executor(self, cs_sync): + """ + if no executor is provided, raise an exception + """ + with pytest.raises(ValueError) as e: + cs_sync.create_task(lambda: None) + assert "sync_executor is required" in str(e.value) + + @pytest.mark.asyncio + async def test_create_task_async(self, cs_async): + """ + Test creating Future using create_task() + """ + + async def coro_fn(x, y): + return x + y + + result = cs_async.create_task(coro_fn, 1, y=4) + assert isinstance(result, asyncio.Task) + assert await result == 5 + + @pytest.mark.asyncio + async def test_create_task_async_passthrough(self, cs_async): + """ + async version passed through to asyncio.create_task() + """ + coro_fn = mock.Mock() + coro_fn.return_value = object() + args = [1, 2, 3] + kwargs = {"a": 1, "b": 2} + with mock.patch.object(asyncio, "create_task", mock.Mock()) as create_task: + cs_async.create_task(coro_fn, *args, **kwargs) + create_task.assert_called_once() + create_task.assert_called_once_with(coro_fn.return_value) + coro_fn.assert_called_once_with(*args, **kwargs) + + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="Task names require python 3.8" + ) + @pytest.mark.asyncio + async def test_create_task_async_with_name(self, cs_async): + """ + Test creating a task with a name + """ + + async def coro_fn(): + return None + + name = "test-name-456" + result = cs_async.create_task(coro_fn, task_name=name) + assert isinstance(result, asyncio.Task) + assert result.get_name() == name + + def test_yeild_to_event_loop(self, cs_sync): + """ + no-op in sync version + """ + assert cs_sync.yield_to_event_loop() is None + + @pytest.mark.asyncio + async def test_yield_to_event_loop_async(self, cs_async): + """ + should call await asyncio.sleep(0) + """ + with mock.patch.object(asyncio, "sleep", AsyncMock()) as sleep: + await cs_async.yield_to_event_loop() + sleep.assert_called_once_with(0) + + def test_verify_async_event_loop(self, cs_sync): + """ + no-op in sync version + """ + assert cs_sync.verify_async_event_loop() is None + + @pytest.mark.asyncio + async def test_verify_async_event_loop_async(self, cs_async): + """ + should call asyncio.get_running_loop() + """ + with mock.patch.object(asyncio, "get_running_loop") as get_running_loop: + cs_async.verify_async_event_loop() + get_running_loop.assert_called_once() + + def test_verify_async_event_loop_no_event_loop(self, cs_async): + """ + Should raise an exception if no event loop is running + """ + with pytest.raises(RuntimeError) as e: + cs_async.verify_async_event_loop() + assert "no running event loop" in str(e.value) + + def test_rmaio(self, cs_async): + """ + rm_aio should return whatever is passed to it + """ + assert cs_async.rm_aio(1) == 1 + assert cs_async.rm_aio("test") == "test" + obj = object() + assert cs_async.rm_aio(obj) == obj + + def test_add_mapping(self, cs_sync, cs_async): + """ + Add dynamic attributes to each class using add_mapping() + """ + for cls in [cs_sync, cs_async]: + cls.add_mapping("test", 1) + assert cls.test == 1 + assert cls._runtime_replacements[(cls, "test")] == 1 + + def test_add_duplicate_mapping(self, cs_sync, cs_async): + """ + Adding the same attribute twice should raise an exception + """ + for cls in [cs_sync, cs_async]: + cls.add_mapping("duplicate", 1) + with pytest.raises(AttributeError) as e: + cls.add_mapping("duplicate", 2) + assert "Conflicting assignments" in str(e.value) + + def test_add_mapping_decorator(self, cs_sync, cs_async): + """ + add_mapping_decorator should allow wrapping classes with add_mapping() + """ + for cls in [cs_sync, cs_async]: + + @cls.add_mapping_decorator("decorated") + class Decorated: + pass + + assert cls.decorated == Decorated diff --git a/tests/unit/data/_cross_sync/test_cross_sync_decorators.py b/tests/unit/data/_cross_sync/test_cross_sync_decorators.py new file mode 100644 index 000000000..3be579379 --- /dev/null +++ b/tests/unit/data/_cross_sync/test_cross_sync_decorators.py @@ -0,0 +1,542 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import pytest_asyncio +import ast +from unittest import mock +from google.cloud.bigtable.data._cross_sync.cross_sync import CrossSync +from google.cloud.bigtable.data._cross_sync._decorators import ( + ConvertClass, + Convert, + Drop, + Pytest, + PytestFixture, +) + + +@pytest.fixture +def globals_mock(): + mock_transform = mock.Mock() + mock_transform().visit = lambda x: x + global_dict = { + k: mock_transform + for k in ["RmAioFunctions", "SymbolReplacer", "CrossSyncMethodDecoratorHandler"] + } + return global_dict + + +class TestConvertClassDecorator: + def _get_class(self): + return ConvertClass + + def test_ctor_defaults(self): + """ + Should set default values for path, add_mapping_for_name, and docstring_format_vars + """ + instance = self._get_class()() + assert instance.sync_name is None + assert instance.replace_symbols is None + assert instance.add_mapping_for_name is None + assert instance.async_docstring_format_vars == {} + assert instance.sync_docstring_format_vars == {} + assert instance.rm_aio is False + + def test_ctor(self): + sync_name = "sync_name" + replace_symbols = {"a": "b"} + docstring_format_vars = {"A": (1, 2)} + add_mapping_for_name = "test_name" + rm_aio = True + + instance = self._get_class()( + sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + add_mapping_for_name=add_mapping_for_name, + rm_aio=rm_aio, + ) + assert instance.sync_name is sync_name + assert instance.replace_symbols is replace_symbols + assert instance.add_mapping_for_name is add_mapping_for_name + assert instance.async_docstring_format_vars == {"A": 1} + assert instance.sync_docstring_format_vars == {"A": 2} + assert instance.rm_aio is rm_aio + + def test_class_decorator(self): + """ + Should return class being decorated + """ + unwrapped_class = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_class, sync_name="s") + assert unwrapped_class == wrapped_class + + def test_class_decorator_adds_mapping(self): + """ + If add_mapping_for_name is set, should call CrossSync.add_mapping with the class being decorated + """ + with mock.patch.object(CrossSync, "add_mapping") as add_mapping: + mock_cls = mock.Mock + # check decoration with no add_mapping + self._get_class().decorator(sync_name="s")(mock_cls) + assert add_mapping.call_count == 0 + # check decoration with add_mapping + name = "test_name" + self._get_class().decorator(sync_name="s", add_mapping_for_name=name)( + mock_cls + ) + assert add_mapping.call_count == 1 + add_mapping.assert_called_once_with(name, mock_cls) + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "1"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "1 3"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello world"], + ["{empty}", {"empty": ("", "")}, ""], + ["{empty}", {"empty": (None, None)}, ""], + ["maybe{empty}", {"empty": (None, "yes")}, "maybe"], + ["maybe{empty}", {"empty": (" no", None)}, "maybe no"], + ], + ) + def test_class_decorator_docstring_update(self, docstring, format_vars, expected): + """ + If docstring_format_vars is set, should update the docstring + of the class being decorated + """ + + @ConvertClass.decorator(sync_name="s", docstring_format_vars=format_vars) + class Class: + __doc__ = docstring + + assert Class.__doc__ == expected + # check internal state + instance = self._get_class()(sync_name="s", docstring_format_vars=format_vars) + async_replacements = {k: v[0] or "" for k, v in format_vars.items()} + sync_replacements = {k: v[1] or "" for k, v in format_vars.items()} + assert instance.async_docstring_format_vars == async_replacements + assert instance.sync_docstring_format_vars == sync_replacements + + def test_sync_ast_transform_replaces_name(self, globals_mock): + """ + Should update the name of the new class + """ + decorator = self._get_class()("SyncClass") + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert result.name == "SyncClass" + + def test_sync_ast_transform_strips_cross_sync_decorators(self, globals_mock): + """ + should remove all CrossSync decorators from the class + """ + decorator = self._get_class()("path") + cross_sync_decorator = ast.Call( + func=ast.Attribute( + value=ast.Name(id="CrossSync", ctx=ast.Load()), + attr="some_decorator", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + other_decorator = ast.Name(id="other_decorator", ctx=ast.Load()) + mock_node = ast.ClassDef( + name="AsyncClass", + bases=[], + keywords=[], + body=[], + decorator_list=[cross_sync_decorator, other_decorator], + ) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Name) + assert result.decorator_list[0].id == "other_decorator" + + def test_sync_ast_transform_add_mapping(self, globals_mock): + """ + If add_mapping_for_name is set, should add CrossSync.add_mapping_decorator to new class + """ + decorator = self._get_class()("path", add_mapping_for_name="sync_class") + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Call) + assert isinstance(result.decorator_list[0].func, ast.Attribute) + assert result.decorator_list[0].func.attr == "add_mapping_decorator" + assert result.decorator_list[0].args[0].value == "sync_class" + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "2"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "2 4"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello moon"], + ], + ) + def test_sync_ast_transform_add_docstring_format( + self, docstring, format_vars, expected, globals_mock + ): + """ + If docstring_format_vars is set, should format the docstring of the new class + """ + decorator = self._get_class()( + "path.to.SyncClass", docstring_format_vars=format_vars + ) + mock_node = ast.ClassDef( + name="AsyncClass", + bases=[], + keywords=[], + body=[ast.Expr(value=ast.Constant(value=docstring))], + ) + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert isinstance(result.body[0], ast.Expr) + assert isinstance(result.body[0].value, ast.Constant) + assert result.body[0].value.value == expected + + def test_sync_ast_transform_replace_symbols(self, globals_mock): + """ + SymbolReplacer should be called with replace_symbols + """ + replace_symbols = {"a": "b", "c": "d"} + decorator = self._get_class()( + "path.to.SyncClass", replace_symbols=replace_symbols + ) + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + symbol_transform_mock = mock.Mock() + globals_mock = {**globals_mock, "SymbolReplacer": symbol_transform_mock} + decorator.sync_ast_transform(mock_node, globals_mock) + # make sure SymbolReplacer was called with replace_symbols + assert symbol_transform_mock.call_count == 1 + found_dict = symbol_transform_mock.call_args[0][0] + assert "a" in found_dict + for k, v in replace_symbols.items(): + assert found_dict[k] == v + + def test_sync_ast_transform_rmaio_calls_async_to_sync(self): + """ + Should call AsyncToSync if rm_aio is set + """ + decorator = self._get_class()(rm_aio=True) + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.side_effect = lambda x: x + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + + +class TestConvertDecorator: + def _get_class(self): + return Convert + + def test_ctor_defaults(self): + instance = self._get_class()() + assert instance.sync_name is None + assert instance.replace_symbols is None + assert instance.async_docstring_format_vars == {} + assert instance.sync_docstring_format_vars == {} + assert instance.rm_aio is True + + def test_ctor(self): + sync_name = "sync_name" + replace_symbols = {"a": "b"} + docstring_format_vars = {"A": (1, 2)} + rm_aio = False + + instance = self._get_class()( + sync_name=sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + rm_aio=rm_aio, + ) + assert instance.sync_name is sync_name + assert instance.replace_symbols is replace_symbols + assert instance.async_docstring_format_vars == {"A": 1} + assert instance.sync_docstring_format_vars == {"A": 2} + assert instance.rm_aio is rm_aio + + def test_async_decorator_no_docstring(self): + """ + If no docstring_format_vars is set, should be a no-op + """ + unwrapped_class = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_class) + assert unwrapped_class == wrapped_class + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "1"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "1 3"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello world"], + ["{empty}", {"empty": ("", "")}, ""], + ["{empty}", {"empty": (None, None)}, ""], + ["maybe{empty}", {"empty": (None, "yes")}, "maybe"], + ["maybe{empty}", {"empty": (" no", None)}, "maybe no"], + ], + ) + def test_async_decorator_docstring_update(self, docstring, format_vars, expected): + """ + If docstring_format_vars is set, should update the docstring + of the class being decorated + """ + + @Convert.decorator(docstring_format_vars=format_vars) + class Class: + __doc__ = docstring + + assert Class.__doc__ == expected + # check internal state + instance = self._get_class()(docstring_format_vars=format_vars) + async_replacements = {k: v[0] or "" for k, v in format_vars.items()} + sync_replacements = {k: v[1] or "" for k, v in format_vars.items()} + assert instance.async_docstring_format_vars == async_replacements + assert instance.sync_docstring_format_vars == sync_replacements + + def test_sync_ast_transform_remove_adef(self): + """ + Should convert `async def` methods to `def` methods + """ + decorator = self._get_class()(rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.FunctionDef) + assert result.name == "test_method" + + def test_sync_ast_transform_replaces_name(self, globals_mock): + """ + Should update the name of the method if sync_name is set + """ + decorator = self._get_class()(sync_name="new_method_name", rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="old_method_name", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.FunctionDef) + assert result.name == "new_method_name" + + def test_sync_ast_transform_rmaio_calls_async_to_sync(self): + """ + Should call AsyncToSync if rm_aio is set + """ + decorator = self._get_class()(rm_aio=True) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.return_value = mock_node + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + + def test_sync_ast_transform_replace_symbols(self): + """ + Should call SymbolReplacer with replace_symbols if replace_symbols is set + """ + replace_symbols = {"old_symbol": "new_symbol"} + decorator = self._get_class()(replace_symbols=replace_symbols, rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + symbol_replacer_mock = mock.Mock() + globals_mock = {"SymbolReplacer": symbol_replacer_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + + assert symbol_replacer_mock.call_count == 1 + assert symbol_replacer_mock.call_args[0][0] == replace_symbols + assert symbol_replacer_mock(replace_symbols).visit.call_count == 1 + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "2"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "2 4"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello moon"], + ], + ) + def test_sync_ast_transform_add_docstring_format( + self, docstring, format_vars, expected + ): + """ + If docstring_format_vars is set, should format the docstring of the new method + """ + decorator = self._get_class()(docstring_format_vars=format_vars, rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", + args=ast.arguments(), + body=[ast.Expr(value=ast.Constant(value=docstring))], + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.FunctionDef) + assert isinstance(result.body[0], ast.Expr) + assert isinstance(result.body[0].value, ast.Constant) + assert result.body[0].value.value == expected + + +class TestDropDecorator: + def _get_class(self): + return Drop + + def test_decorator_functionality(self): + """ + applying the decorator should be a no-op + """ + unwrapped = lambda x: x # noqa: E731 + wrapped = self._get_class().decorator(unwrapped) + assert unwrapped == wrapped + assert unwrapped(1) == wrapped(1) + assert wrapped(1) == 1 + + def test_sync_ast_transform(self): + """ + Should return None for any input method + """ + decorator = self._get_class()() + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert result is None + + +class TestPytestDecorator: + def _get_class(self): + return Pytest + + def test_ctor(self): + instance = self._get_class()() + assert instance.rm_aio is True + instance = self._get_class()(rm_aio=False) + assert instance.rm_aio is False + + def test_decorator_functionality(self): + """ + Should wrap the class with pytest.mark.asyncio + """ + unwrapped_fn = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_fn) + assert wrapped_class == pytest.mark.asyncio(unwrapped_fn) + + def test_sync_ast_transform(self): + """ + If rm_aio is True (default), should call AsyncToSync on the class + """ + decorator = self._get_class()() + mock_node = ast.AsyncFunctionDef( + name="AsyncMethod", args=ast.arguments(), body=[] + ) + + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.side_effect = lambda x: x + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + transformed = decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + assert isinstance(transformed, ast.FunctionDef) + + def test_sync_ast_transform_no_rm_aio(self): + """ + if rm_aio is False, should remove the async keyword from the method + """ + decorator = self._get_class()(rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="AsyncMethod", args=ast.arguments(), body=[] + ) + + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.return_value = mock_node + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + transformed = decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 0 + assert isinstance(transformed, ast.FunctionDef) + + +class TestPytestFixtureDecorator: + def _get_class(self): + return PytestFixture + + def test_decorator_functionality(self): + """ + Should wrap the class with pytest_asyncio.fixture + """ + with mock.patch.object(pytest_asyncio, "fixture") as fixture: + + @PytestFixture.decorator(1, 2, scope="function", params=[3, 4]) + def fn(): + pass + + assert fixture.call_count == 1 + assert fixture.call_args[0] == (1, 2) + assert fixture.call_args[1] == {"scope": "function", "params": [3, 4]} + + def test_sync_ast_transform(self): + """ + Should attach pytest.fixture to generated method + """ + decorator = self._get_class()(1, 2, scope="function") + + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.AsyncFunctionDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Call) + assert result.decorator_list[0].func.value.id == "pytest" + assert result.decorator_list[0].func.attr == "fixture" + assert result.decorator_list[0].args[0].value == 1 + assert result.decorator_list[0].args[1].value == 2 + assert result.decorator_list[0].keywords[0].arg == "scope" + assert result.decorator_list[0].keywords[0].value.value == "function" diff --git a/tests/unit/data/_metrics/__init__.py b/tests/unit/data/_metrics/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/data/_metrics/test_data_model.py b/tests/unit/data/_metrics/test_data_model.py new file mode 100644 index 000000000..93e73c9d8 --- /dev/null +++ b/tests/unit/data/_metrics/test_data_model.py @@ -0,0 +1,730 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import mock + +from google.cloud.bigtable.data._metrics.data_model import OperationState as State +from google.cloud.bigtable_v2.types import ResponseParams + + +class TestActiveOperationMetric: + def _make_one(self, *args, **kwargs): + from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric + + return ActiveOperationMetric(*args, **kwargs) + + @mock.patch("time.monotonic_ns") + def test_ctor_defaults(self, mock_monotonic_ns): + """ + create an instance with default values + """ + expected_timestamp = 123456789 + mock_monotonic_ns.return_value = expected_timestamp + mock_type = mock.Mock() + metric = self._make_one(mock_type) + assert metric.op_type == mock_type + assert metric.start_time_ns == expected_timestamp + assert metric.active_attempt is None + assert metric.cluster_id is None + assert metric.zone is None + assert len(metric.completed_attempts) == 0 + assert len(metric.handlers) == 0 + assert metric.is_streaming is False + assert metric.flow_throttling_time_ns == 0 + assert metric.state == State.CREATED + + def test_ctor_explicit(self): + """ + test with explicit arguments + """ + expected_type = mock.Mock() + expected_start_time_ns = 7 + expected_active_attempt = mock.Mock() + expected_cluster_id = "cluster" + expected_zone = "zone" + expected_completed_attempts = [mock.Mock()] + expected_state = State.COMPLETED + expected_handlers = [mock.Mock()] + expected_is_streaming = True + expected_flow_throttling = 12 + metric = self._make_one( + op_type=expected_type, + start_time_ns=expected_start_time_ns, + active_attempt=expected_active_attempt, + cluster_id=expected_cluster_id, + zone=expected_zone, + state=expected_state, + completed_attempts=expected_completed_attempts, + handlers=expected_handlers, + is_streaming=expected_is_streaming, + flow_throttling_time_ns=expected_flow_throttling, + ) + assert metric.op_type == expected_type + assert metric.start_time_ns == expected_start_time_ns + assert metric.active_attempt == expected_active_attempt + assert metric.cluster_id == expected_cluster_id + assert metric.zone == expected_zone + assert metric.completed_attempts == expected_completed_attempts + assert metric.state == expected_state + assert metric.handlers == expected_handlers + assert metric.is_streaming == expected_is_streaming + assert metric.flow_throttling_time_ns == expected_flow_throttling + + def test_state_machine_w_methods(self): + """ + Exercise the state machine by calling methods to move between states + """ + metric = self._make_one(mock.Mock()) + assert metric.state == State.CREATED + metric.start() + assert metric.state == State.CREATED + metric.start_attempt() + assert metric.state == State.ACTIVE_ATTEMPT + metric.end_attempt_with_status(Exception()) + assert metric.state == State.BETWEEN_ATTEMPTS + metric.start_attempt() + assert metric.state == State.ACTIVE_ATTEMPT + metric.end_with_success() + assert metric.state == State.COMPLETED + + def test_state_machine(self): + """ + Exercise state machine by moving through states + """ + metric = self._make_one(mock.Mock()) + assert metric.state == State.CREATED + metric.start_attempt() + assert metric.state == State.ACTIVE_ATTEMPT + metric.end_attempt_with_status(0) + assert metric.state == State.BETWEEN_ATTEMPTS + metric.end_with_success() + assert metric.state == State.COMPLETED + + @pytest.mark.parametrize( + "method,args,valid_states,error_method_name", + [ + ("start", (), (State.CREATED,), None), + ("start_attempt", (), (State.CREATED, State.BETWEEN_ATTEMPTS), None), + ("add_response_metadata", ({},), (State.ACTIVE_ATTEMPT,), None), + ("end_attempt_with_status", (mock.Mock(),), (State.ACTIVE_ATTEMPT,), None), + ( + "end_with_status", + (mock.Mock(),), + ( + State.CREATED, + State.ACTIVE_ATTEMPT, + State.BETWEEN_ATTEMPTS, + ), + None, + ), + ( + "end_with_success", + (), + ( + State.CREATED, + State.ACTIVE_ATTEMPT, + State.BETWEEN_ATTEMPTS, + ), + "end_with_status", + ), + ], + ids=lambda x: x if isinstance(x, str) else "", + ) + def test_error_invalid_states(self, method, args, valid_states, error_method_name): + """ + each method only works for certain states. Make sure _handle_error is called for invalid states + """ + cls = type(self._make_one(mock.Mock())) + invalid_states = set(State) - set(valid_states) + error_method_name = error_method_name or method + for state in invalid_states: + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + mock_handle_error.return_value = None + metric = self._make_one(mock.Mock(), state=state) + return_obj = getattr(metric, method)(*args) + assert return_obj is None + assert mock_handle_error.call_count == 1 + assert ( + mock_handle_error.call_args[0][0] + == f"Invalid state for {error_method_name}: {state}" + ) + + @mock.patch("time.monotonic_ns") + def test_start(self, mock_monotonic_ns): + """ + calling start op operation should reset start_time + """ + expected_timestamp = 123456789 + mock_monotonic_ns.return_value = expected_timestamp + orig_time = 0 + metric = self._make_one(mock.Mock(), start_time_ns=orig_time) + assert metric.start_time_ns == 0 + metric.start() + assert metric.start_time_ns != orig_time + assert metric.start_time_ns == expected_timestamp + # should remain in CREATED state after completing + assert metric.state == State.CREATED + + @mock.patch("time.monotonic_ns") + def test_start_attempt(self, mock_monotonic_ns): + """ + calling start_attempt should create a new emptu atempt metric + """ + from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric + + expected_timestamp = 123456789 + mock_monotonic_ns.return_value = expected_timestamp + metric = self._make_one(mock.Mock()) + assert metric.active_attempt is None + metric.start_attempt() + assert isinstance(metric.active_attempt, ActiveAttemptMetric) + # make sure it was initialized with the correct values + assert metric.active_attempt.start_time_ns == expected_timestamp + assert metric.active_attempt.gfe_latency_ns is None + # should be in ACTIVE_ATTEMPT state after completing + assert metric.state == State.ACTIVE_ATTEMPT + + def test_start_attempt_with_backoff_generator(self): + """ + If operation has a backoff generator, it should be used to attach backoff + times to attempts + """ + from google.cloud.bigtable.data._helpers import TrackedBackoffGenerator + + generator = TrackedBackoffGenerator() + # pre-seed generator with exepcted values + generator.history = list(range(10)) + metric = self._make_one(mock.Mock(), backoff_generator=generator) + metric.start_attempt() + assert len(metric.completed_attempts) == 0 + # first attempt should always be 0 + assert metric.active_attempt.backoff_before_attempt_ns == 0 + # later attempts should have their attempt number as backoff time + for i in range(10): + metric.end_attempt_with_status(mock.Mock()) + assert len(metric.completed_attempts) == i + 1 + metric.start_attempt() + # expect the backoff to be converted froms seconds to ns + assert metric.active_attempt.backoff_before_attempt_ns == (i * 1e9) + + @pytest.mark.parametrize( + "start_cluster,start_zone,metadata_proto,end_cluster,end_zone", + [ + (None, None, None, None, None), + ("orig_cluster", "orig_zone", None, "orig_cluster", "orig_zone"), + (None, None, ResponseParams(), None, None), + ( + "orig_cluster", + "orig_zone", + ResponseParams(), + "orig_cluster", + "orig_zone", + ), + ( + None, + None, + ResponseParams(cluster_id="test-cluster", zone_id="us-central1-b"), + "test-cluster", + "us-central1-b", + ), + ( + None, + "filled", + ResponseParams(cluster_id="cluster", zone_id="zone"), + "cluster", + "zone", + ), + (None, "filled", ResponseParams(cluster_id="cluster"), "cluster", "filled"), + (None, "filled", ResponseParams(zone_id="zone"), None, "zone"), + ( + "filled", + None, + ResponseParams(cluster_id="cluster", zone_id="zone"), + "cluster", + "zone", + ), + ("filled", None, ResponseParams(cluster_id="cluster"), "cluster", None), + ("filled", None, ResponseParams(zone_id="zone"), "filled", "zone"), + ], + ) + def test_add_response_metadata_cbt_header( + self, start_cluster, start_zone, metadata_proto, end_cluster, end_zone + ): + """ + calling add_response_metadata should update fields based on grpc response metadata + The x-goog-ext-425905942-bin field contains cluster and zone info + """ + import grpc + + cls = type(self._make_one(mock.Mock())) + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + metric = self._make_one( + mock.Mock(), + cluster_id=start_cluster, + zone=start_zone, + state=State.ACTIVE_ATTEMPT, + ) + metric.active_attempt = mock.Mock() + metric.active_attempt.gfe_latency_ns = None + metadata = grpc.aio.Metadata() + if metadata_proto is not None: + metadata["x-goog-ext-425905942-bin"] = ResponseParams.serialize( + metadata_proto + ) + metric.add_response_metadata(metadata) + assert metric.cluster_id == end_cluster + assert metric.zone == end_zone + # should remain in ACTIVE_ATTEMPT state after completing + assert metric.state == State.ACTIVE_ATTEMPT + # no errors encountered + assert mock_handle_error.call_count == 0 + # gfe latency should not be touched + assert metric.active_attempt.gfe_latency_ns is None + + @pytest.mark.parametrize( + "metadata_field", + [ + b"bad-input", + "cluster zone", # expect bytes + ], + ) + def test_add_response_metadata_cbt_header_w_error(self, metadata_field): + """ + If the x-goog-ext-425905942-bin field is present, but not structured properly, + _handle_error should be called + + Extra fields should not result in parsingerror + """ + import grpc + + cls = type(self._make_one(mock.Mock())) + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + metric = self._make_one(mock.Mock(), state=State.ACTIVE_ATTEMPT) + metric.cluster_id = None + metric.zone = None + metric.active_attempt = mock.Mock() + metadata = grpc.aio.Metadata() + metadata["x-goog-ext-425905942-bin"] = metadata_field + metric.add_response_metadata(metadata) + # should remain in ACTIVE_ATTEMPT state after completing + assert metric.state == State.ACTIVE_ATTEMPT + # no errors encountered + assert mock_handle_error.call_count == 1 + assert ( + "Failed to decode x-goog-ext-425905942-bin metadata:" + in mock_handle_error.call_args[0][0] + ) + assert str(metadata_field) in mock_handle_error.call_args[0][0] + + @pytest.mark.parametrize( + "metadata_field,expected_latency_ns", + [ + (None, None), + ("gfet4t7; dur=1000", 1000e6), + ("gfet4t7; dur=1000.0", 1000e6), + ("gfet4t7; dur=1000.1", 1000.1e6), + ("gcp; dur=15, gfet4t7; dur=300", 300e6), + ("gfet4t7;dur=350,gcp;dur=12", 350e6), + ("ignore_megfet4t7;dur=90ignore_me", 90e6), + ("gfet4t7;dur=2000", 2000e6), + ("gfet4t7; dur=0.001", 1000), + ("gfet4t7; dur=0.000001", 1), + ("gfet4t7; dur=0.0000001", 0), # below recording resolution + ("gfet4t7; dur=0", 0), + ("gfet4t7; dur=empty", None), + ("gfet4t7;", None), + ("", None), + ], + ) + def test_add_response_metadata_server_timing_header( + self, metadata_field, expected_latency_ns + ): + """ + calling add_response_metadata should update fields based on grpc response metadata + The server-timing field contains gfle latency info + """ + import grpc + + cls = type(self._make_one(mock.Mock())) + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + metric = self._make_one(mock.Mock(), state=State.ACTIVE_ATTEMPT) + metric.active_attempt = mock.Mock() + metric.active_attempt.gfe_latency_ns = None + metadata = grpc.aio.Metadata() + if metadata_field: + metadata["server-timing"] = metadata_field + metric.add_response_metadata(metadata) + if metric.active_attempt.gfe_latency_ns is None: + assert expected_latency_ns is None + else: + assert metric.active_attempt.gfe_latency_ns == int(expected_latency_ns) + # should remain in ACTIVE_ATTEMPT state after completing + assert metric.state == State.ACTIVE_ATTEMPT + # no errors encountered + assert mock_handle_error.call_count == 0 + # cluster and zone should not be touched + assert metric.cluster_id is None + assert metric.zone is None + + @mock.patch("time.monotonic_ns") + def test_end_attempt_with_status(self, mock_monotonic_ns): + """ + ending the attempt should: + - add one to completed_attempts + - reset active_attempt to None + - update state + - notify handlers + """ + expected_mock_time = 123456789 + mock_monotonic_ns.return_value = expected_mock_time + expected_start_time = 1 + expected_status = object() + expected_gfe_latency_ns = 5 + expected_app_blocking = 12 + expected_backoff = 2 + handlers = [mock.Mock(), mock.Mock()] + + metric = self._make_one(mock.Mock(), handlers=handlers) + assert metric.active_attempt is None + assert len(metric.completed_attempts) == 0 + metric.start_attempt() + metric.active_attempt.start_time_ns = expected_start_time + metric.active_attempt.gfe_latency_ns = expected_gfe_latency_ns + metric.active_attempt.application_blocking_time_ns = expected_app_blocking + metric.active_attempt.backoff_before_attempt_ns = expected_backoff + metric.end_attempt_with_status(expected_status) + assert len(metric.completed_attempts) == 1 + got_attempt = metric.completed_attempts[0] + expected_duration = expected_mock_time - expected_start_time + assert got_attempt.duration_ns == expected_duration + assert got_attempt.end_status == expected_status + assert got_attempt.gfe_latency_ns == expected_gfe_latency_ns + assert got_attempt.application_blocking_time_ns == expected_app_blocking + assert got_attempt.backoff_before_attempt_ns == expected_backoff + # state should be changed to BETWEEN_ATTEMPTS + assert metric.state == State.BETWEEN_ATTEMPTS + # check handlers + for h in handlers: + assert h.on_attempt_complete.call_count == 1 + assert h.on_attempt_complete.call_args[0][0] == got_attempt + assert h.on_attempt_complete.call_args[0][1] == metric + + def test_end_attempt_with_status_w_exception(self): + """ + exception inputs should be converted to grpc status objects + """ + input_status = ValueError("test") + expected_status = object() + + metric = self._make_one(mock.Mock()) + metric.start_attempt() + with mock.patch.object( + metric, "_exc_to_status", return_value=expected_status + ) as mock_exc_to_status: + metric.end_attempt_with_status(input_status) + assert mock_exc_to_status.call_count == 1 + assert mock_exc_to_status.call_args[0][0] == input_status + assert metric.completed_attempts[0].end_status == expected_status + + @mock.patch("time.monotonic_ns") + def test_end_attempt_with_negative_duration_ns(self, mock_monotonic_ns): + """ + If duration_ns is negative, it should be set to 0 and _handle_error should be called + """ + cls = type(self._make_one(mock.Mock())) + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + metric = self._make_one(mock.Mock()) + metric.start_attempt() + metric.active_attempt.start_time_ns = 100 + mock_monotonic_ns.return_value = 50 # Simulate time going backwards + metric.end_attempt_with_status(mock.Mock()) + + assert mock_handle_error.call_count == 1 + assert ( + "received negative value for duration" + in mock_handle_error.call_args[0][0] + ) + assert metric.completed_attempts[0].duration_ns == 0 + + @mock.patch("time.monotonic_ns") + def test_end_with_status(self, mock_monotonic_ns): + """ + ending the operation should: + - end active attempt + - mark operation as completed + - update handlers + """ + from google.cloud.bigtable.data._metrics.data_model import ActiveAttemptMetric + + expected_mock_time = 123456789 + mock_monotonic_ns.return_value = expected_mock_time + expected_attempt_start_time = 0 + expected_attempt_gfe_latency_ns = 5 + expected_flow_time = 16 + + expected_first_response_latency_ns = 9 + expected_status = object() + expected_type = object() + expected_start_time = 1 + expected_cluster = object() + expected_zone = object() + is_streaming = object() + + handlers = [mock.Mock(), mock.Mock()] + metric = self._make_one( + expected_type, + handlers=handlers, + start_time_ns=expected_start_time, + state=State.ACTIVE_ATTEMPT, + ) + metric.cluster_id = expected_cluster + metric.zone = expected_zone + metric.is_streaming = is_streaming + metric.flow_throttling_time_ns = expected_flow_time + metric.first_response_latency_ns = expected_first_response_latency_ns + attempt = ActiveAttemptMetric( + start_time_ns=expected_attempt_start_time, + gfe_latency_ns=expected_attempt_gfe_latency_ns, + ) + metric.active_attempt = attempt + metric.end_with_status(expected_status) + # test that ActiveOperation was updated to terminal state + assert metric.state == State.COMPLETED + assert metric.active_attempt is None + assert len(metric.completed_attempts) == 1 + # check that finalized operation was passed to handlers + for h in handlers: + assert h.on_operation_complete.call_count == 1 + assert len(h.on_operation_complete.call_args[0]) == 1 + called_with = h.on_operation_complete.call_args[0][0] + assert called_with.op_type == expected_type + expected_duration = expected_mock_time - expected_start_time + assert called_with.duration_ns == expected_duration + assert called_with.final_status == expected_status + assert called_with.cluster_id == expected_cluster + assert called_with.zone == expected_zone + assert called_with.is_streaming == is_streaming + assert called_with.flow_throttling_time_ns == expected_flow_time + assert ( + called_with.first_response_latency_ns + == expected_first_response_latency_ns + ) + # check the attempt + assert len(called_with.completed_attempts) == 1 + final_attempt = called_with.completed_attempts[0] + assert final_attempt.gfe_latency_ns == expected_attempt_gfe_latency_ns + assert final_attempt.end_status == expected_status + expected_duration = expected_mock_time - expected_attempt_start_time + assert final_attempt.duration_ns == expected_duration + + @mock.patch("time.monotonic_ns") + def test_end_with_negative_duration_ns(self, mock_monotonic_ns): + """ + If operation duration_ns is negative, it should be set to 0 and _handle_error should be called + """ + cls = type(self._make_one(mock.Mock())) + with mock.patch.object(cls, "_handle_error") as mock_handle_error: + metric = self._make_one(mock.Mock(), handlers=[mock.Mock()]) + metric.start_time_ns = 100 + mock_monotonic_ns.return_value = 50 # Simulate time going backwards + metric.end_with_status(mock.Mock()) + + assert mock_handle_error.call_count == 1 + assert ( + "received negative value for duration" + in mock_handle_error.call_args[0][0] + ) + final_op = metric.handlers[0].on_operation_complete.call_args[0][0] + assert final_op.duration_ns == 0 + + def test_end_with_status_w_exception(self): + """ + exception inputs should be converted to grpc status objects + """ + input_status = ValueError("test") + expected_status = object() + handlers = [mock.Mock()] + + metric = self._make_one(mock.Mock(), handlers=handlers) + metric.start_attempt() + with mock.patch.object( + metric, "_exc_to_status", return_value=expected_status + ) as mock_exc_to_status: + metric.end_with_status(input_status) + assert mock_exc_to_status.call_count == 1 + assert mock_exc_to_status.call_args[0][0] == input_status + assert metric.completed_attempts[0].end_status == expected_status + final_op = handlers[0].on_operation_complete.call_args[0][0] + assert final_op.final_status == expected_status + + def test_end_with_status_with_default_cluster_zone(self): + """ + ending the operation should use default cluster and zone if not set + """ + from google.cloud.bigtable.data._metrics.data_model import ( + DEFAULT_CLUSTER_ID, + DEFAULT_ZONE, + ) + + handlers = [mock.Mock()] + metric = self._make_one(mock.Mock(), handlers=handlers) + assert metric.cluster_id is None + assert metric.zone is None + metric.end_with_status(mock.Mock()) + assert metric.state == State.COMPLETED + # check that finalized operation was passed to handlers + for h in handlers: + assert h.on_operation_complete.call_count == 1 + called_with = h.on_operation_complete.call_args[0][0] + assert called_with.cluster_id == DEFAULT_CLUSTER_ID + assert called_with.zone == DEFAULT_ZONE + + def test_end_with_success(self): + """ + end with success should be a pass-through helper for end_with_status + """ + from grpc import StatusCode + + inner_result = object() + + metric = self._make_one(mock.Mock()) + with mock.patch.object(metric, "end_with_status") as mock_end_with_status: + mock_end_with_status.return_value = inner_result + got_result = metric.end_with_success() + assert mock_end_with_status.call_count == 1 + assert mock_end_with_status.call_args[0][0] == StatusCode.OK + assert got_result is inner_result + + def test_end_on_empty_operation(self): + """ + Should be able to end an operation without any attempts + """ + from grpc import StatusCode + + handlers = [mock.Mock()] + metric = self._make_one(mock.Mock(), handlers=handlers) + metric.end_with_success() + assert metric.state == State.COMPLETED + final_op = handlers[0].on_operation_complete.call_args[0][0] + assert final_op.final_status == StatusCode.OK + assert final_op.completed_attempts == [] + + def test__exc_to_status(self): + """ + Should return grpc_status_code if grpc error, otherwise UNKNOWN + + If BigtableExceptionGroup, use the most recent exception in the group + """ + from grpc import StatusCode + from google.api_core import exceptions as core_exc + from google.cloud.bigtable.data import exceptions as bt_exc + + cls = type(self._make_one(object())) + # unknown for non-grpc errors + assert cls._exc_to_status(ValueError()) == StatusCode.UNKNOWN + assert cls._exc_to_status(RuntimeError()) == StatusCode.UNKNOWN + # grpc status code for grpc errors + assert ( + cls._exc_to_status(core_exc.InvalidArgument("msg")) + == StatusCode.INVALID_ARGUMENT + ) + assert cls._exc_to_status(core_exc.NotFound("msg")) == StatusCode.NOT_FOUND + assert ( + cls._exc_to_status(core_exc.AlreadyExists("msg")) + == StatusCode.ALREADY_EXISTS + ) + assert ( + cls._exc_to_status(core_exc.PermissionDenied("msg")) + == StatusCode.PERMISSION_DENIED + ) + cause_exc = core_exc.AlreadyExists("msg") + w_cause = core_exc.DeadlineExceeded("msg") + w_cause.__cause__ = cause_exc + assert cls._exc_to_status(w_cause) == StatusCode.DEADLINE_EXCEEDED + # use cause if available + w_cause = ValueError("msg") + w_cause.__cause__ = cause_exc + cause_exc.grpc_status_code = object() + custom_excs = [ + bt_exc.FailedMutationEntryError(1, mock.Mock(), cause=cause_exc), + bt_exc.FailedQueryShardError(1, {}, cause=cause_exc), + w_cause, + ] + for exc in custom_excs: + assert cls._exc_to_status(exc) == cause_exc.grpc_status_code, exc + # extract most recent exception for bigtable exception groups + exc_groups = [ + bt_exc._BigtableExceptionGroup("", [ValueError(), cause_exc]), + bt_exc.RetryExceptionGroup([RuntimeError(), cause_exc]), + bt_exc.ShardedReadRowsExceptionGroup( + [bt_exc.FailedQueryShardError(1, {}, cause=cause_exc)], [], 2 + ), + bt_exc.MutationsExceptionGroup( + [bt_exc.FailedMutationEntryError(1, mock.Mock(), cause=cause_exc)], 2 + ), + ] + for exc in exc_groups: + assert cls._exc_to_status(exc) == cause_exc.grpc_status_code, exc + + def test__handle_error(self): + """ + handle_error should write log + """ + input_message = "test message" + expected_message = f"Error in Bigtable Metrics: {input_message}" + with mock.patch( + "google.cloud.bigtable.data._metrics.data_model.LOGGER" + ) as logger_mock: + type(self._make_one(object()))._handle_error(input_message) + assert logger_mock.warning.call_count == 1 + assert logger_mock.warning.call_args[0][0] == expected_message + assert len(logger_mock.warning.call_args[0]) == 1 + + @pytest.mark.asyncio + async def test_context_manager(self): + """ + Should implement context manager protocol + """ + metric = self._make_one(object()) + with mock.patch.object(metric, "end_with_success") as end_with_success_mock: + end_with_success_mock.side_effect = lambda: metric.end_with_status(object()) + with metric as context: + assert context == metric + # inside context manager, still active + assert end_with_success_mock.call_count == 0 + assert metric.state == State.CREATED + # outside context manager, should be ended + assert end_with_success_mock.call_count == 1 + assert metric.state == State.COMPLETED + + @pytest.mark.asyncio + async def test_context_manager_exception(self): + """ + Exception within context manager causes end_with_status to be called with error + """ + expected_exc = ValueError("expected") + metric = self._make_one(object()) + with mock.patch.object(metric, "end_with_status") as end_with_status_mock: + try: + with metric: + # inside context manager, still active + assert end_with_status_mock.call_count == 0 + assert metric.state == State.CREATED + raise expected_exc + except ValueError as e: + assert e == expected_exc + # outside context manager, should be ended + assert end_with_status_mock.call_count == 1 + assert end_with_status_mock.call_args[0][0] == expected_exc diff --git a/tests/unit/data/_metrics/test_metrics_controller.py b/tests/unit/data/_metrics/test_metrics_controller.py new file mode 100644 index 000000000..125c2be1c --- /dev/null +++ b/tests/unit/data/_metrics/test_metrics_controller.py @@ -0,0 +1,96 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + + +class TestBigtableClientSideMetricsController: + def _make_one(self, *args, **kwargs): + from google.cloud.bigtable.data._metrics import ( + BigtableClientSideMetricsController, + ) + + return BigtableClientSideMetricsController(*args, **kwargs) + + def test_ctor_defaults(self): + """ + should create instance with GCP Exporter handler by default + """ + instance = self._make_one() + assert len(instance.handlers) == 0 + + def ctor_custom_handlers(self): + """ + if handlers are passed to init, use those instead + """ + custom_handler = object() + custom_interceptor = object() + controller = self._make_one(custom_interceptor, handlers=[custom_handler]) + assert controller.interceptor == custom_interceptor + assert len(controller.handlers) == 1 + assert controller.handlers[0] is custom_handler + + def test_add_handler(self): + """ + New handlers should be added to list + """ + controller = self._make_one(handlers=[object()]) + initial_handler_count = len(controller.handlers) + new_handler = object() + controller.add_handler(new_handler) + assert len(controller.handlers) == initial_handler_count + 1 + assert controller.handlers[-1] is new_handler + + def test_create_operation_mock(self): + """ + All args should be passed through, as well as the handlers + """ + from google.cloud.bigtable.data._metrics import ActiveOperationMetric + + controller = self._make_one(handlers=[object()]) + arg = object() + kwargs = {"a": 1, "b": 2} + with mock.patch( + "google.cloud.bigtable.data._metrics.ActiveOperationMetric.__init__" + ) as mock_op: + mock_op.return_value = None + op = controller.create_operation(arg, **kwargs) + assert isinstance(op, ActiveOperationMetric) + assert mock_op.call_count == 1 + mock_op.assert_called_with(arg, **kwargs, handlers=controller.handlers) + + def test_create_operation(self): + from google.cloud.bigtable.data._metrics import ActiveOperationMetric + + handler = object() + expected_type = object() + expected_is_streaming = True + expected_zone = object() + controller = self._make_one(handlers=[handler]) + op = controller.create_operation( + expected_type, is_streaming=expected_is_streaming, zone=expected_zone + ) + assert isinstance(op, ActiveOperationMetric) + assert op.op_type is expected_type + assert op.is_streaming is expected_is_streaming + assert op.zone is expected_zone + assert len(op.handlers) == 1 + assert op.handlers[0] is handler + + def test_close(self): + handlers = [mock.Mock() for _ in range(3)] + controller = self._make_one(handlers=handlers) + controller.close() + for handler in handlers: + handler.close.assert_called_once() diff --git a/tests/unit/data/_metrics/test_tracked_retry.py b/tests/unit/data/_metrics/test_tracked_retry.py new file mode 100644 index 000000000..39713dc69 --- /dev/null +++ b/tests/unit/data/_metrics/test_tracked_retry.py @@ -0,0 +1,232 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import inspect +import mock +import sys +from grpc import StatusCode +from google.api_core import exceptions as core_exceptions +from google.api_core.retry import RetryFailureReason +import google.api_core.retry as retry_module + + +class TestTrackRetryableError: + def _call_fut(self, operation): + from google.cloud.bigtable.data._metrics.tracked_retry import ( + _track_retryable_error, + ) + + return _track_retryable_error(operation) + + def test_basic_exception(self): + """should call operation.end_attempt_with_status with the exception for basic exceptions.""" + operation = mock.Mock() + wrapper = self._call_fut(operation) + + exc = RuntimeError("test") + wrapper(exc) + + operation.end_attempt_with_status.assert_called_once_with(exc) + + def test_mutate_rows_incomplete(self): + """should call operation.end_attempt_with_status with StatusCode.OK for _MutateRowsIncomplete exceptions.""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + + operation = mock.Mock() + wrapper = self._call_fut(operation) + + exc = _MutateRowsIncomplete("test") + wrapper(exc) + + operation.end_attempt_with_status.assert_called_once_with(StatusCode.OK) + + def test_rpc_error_metadata(self): + """should extract and add metadata from GoogleAPICallError.""" + operation = mock.Mock() + wrapper = self._call_fut(operation) + + rpc_error = mock.Mock() + rpc_error.trailing_metadata.return_value = (("key1", "val1"),) + rpc_error.initial_metadata.return_value = (("key2", "val2"),) + + exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error]) + wrapper(exc) + + operation.add_response_metadata.assert_called_once_with( + {"key1": "val1", "key2": "val2"} + ) + operation.end_attempt_with_status.assert_called_once_with(exc) + + def test_metadata_error_ignored(self): + """should ignore errors during metadata collection.""" + operation = mock.Mock() + operation.add_response_metadata.side_effect = RuntimeError("metadata error") + wrapper = self._call_fut(operation) + + rpc_error = mock.Mock() + rpc_error.trailing_metadata.return_value = () + rpc_error.initial_metadata.return_value = () + exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error]) + + # should not raise + wrapper(exc) + + operation.end_attempt_with_status.assert_called_once_with(exc) + + +class TestTrackTerminalError: + def _call_fut(self, operation, factory): + from google.cloud.bigtable.data._metrics.tracked_retry import ( + _track_terminal_error, + ) + + return _track_terminal_error(operation, factory) + + def test_basic_pass_through(self): + """should call the exception_factory and end the operation with its result.""" + operation = mock.Mock() + factory = mock.Mock() + expected_exc = RuntimeError("source") + expected_cause = RuntimeError("cause") + factory.return_value = (expected_exc, expected_cause) + + wrapper = self._call_fut(operation, factory) + + exc_list = [RuntimeError("attempt1")] + reason = RetryFailureReason.TIMEOUT + timeout_val = 1.0 + + result = wrapper(exc_list, reason, timeout_val) + + assert result == (expected_exc, expected_cause) + factory.assert_called_once_with(exc_list, reason, timeout_val) + operation.end_with_status.assert_called_once_with(expected_exc) + + def test_timeout_active_attempt(self): + """should end attempt if fails on timeout.""" + from google.cloud.bigtable.data._metrics import OperationState + + operation = mock.Mock() + operation.state = OperationState.ACTIVE_ATTEMPT + factory = mock.Mock() + factory.return_value = (RuntimeError("timeout"), None) + + wrapper = self._call_fut(operation, factory) + + last_exc = RuntimeError("last attempt error") + exc_list = [last_exc] + + wrapper(exc_list, RetryFailureReason.TIMEOUT, 1.0) + + # expect call to end_attempt_with_status via the _track_retryable_error logic + operation.end_attempt_with_status.assert_called_once_with(last_exc) + operation.end_with_status.assert_called_once() + + def test_rpc_error_metadata(self): + """should extract and add metadata from GoogleAPICallError in terminal errors.""" + operation = mock.Mock() + factory = mock.Mock() + + rpc_error = mock.Mock() + rpc_error.trailing_metadata.return_value = (("k", "v"),) + rpc_error.initial_metadata.return_value = () + source_exc = core_exceptions.GoogleAPICallError("test", errors=[rpc_error]) + + factory.return_value = (source_exc, None) + + wrapper = self._call_fut(operation, factory) + wrapper([], RetryFailureReason.NON_RETRYABLE_ERROR, None) + + operation.add_response_metadata.assert_called_once_with({"k": "v"}) + operation.end_with_status.assert_called_once_with(source_exc) + + +class TestTrackedRetry: + def _call_fut(self, **kwargs): + from google.cloud.bigtable.data._metrics.tracked_retry import tracked_retry + + return tracked_retry(**kwargs) + + def test_call_args(self): + """should correctly pass arguments to the retry_fn.""" + operation = mock.Mock() + retry_fn = mock.Mock() + retry_fn.return_value = "result" + + result = self._call_fut(retry_fn=retry_fn, operation=operation, other_arg=123) + + assert result == "result" + retry_fn.assert_called_once() + call_kwargs = retry_fn.call_args[1] + + assert call_kwargs["sleep_generator"] == operation.backoff_generator + assert "on_error" in call_kwargs + assert "exception_factory" in call_kwargs + assert call_kwargs["other_arg"] == 123 + + def test_tracked_retry_wraps_components(self): + """should wrap on_error and exception_factory with tracking logic.""" + from google.cloud.bigtable.data._metrics import tracked_retry + + module = sys.modules[tracked_retry.__module__] + + with mock.patch.object(module, "_track_retryable_error") as mock_track_retry: + with mock.patch.object( + module, "_track_terminal_error" + ) as mock_track_terminal: + operation = mock.Mock() + retry_fn = mock.Mock() + custom_factory = mock.Mock() + + self._call_fut( + retry_fn=retry_fn, + operation=operation, + exception_factory=custom_factory, + arg=1, + ) + + mock_track_retry.assert_called_once_with(operation) + mock_track_terminal.assert_called_once_with(operation, custom_factory) + + retry_fn.assert_called_once_with( + sleep_generator=operation.backoff_generator, + on_error=mock_track_retry.return_value, + exception_factory=mock_track_terminal.return_value, + arg=1, + ) + + @pytest.mark.parametrize( + "fn_name,type_verifier", + [ + ("retry_target", callable), + ("retry_target_stream", inspect.isgenerator), + ("retry_target_async", inspect.iscoroutine), + ("retry_target_stream_async", inspect.isasyncgen), + ], + ) + def test_wrapping_api_core(self, fn_name, type_verifier): + """Test building tracked retry from different supported retry functions""" + from google.cloud.bigtable.data._metrics import ActiveOperationMetric + + operation = ActiveOperationMetric("type") + fn = getattr(retry_module, fn_name) + tracked_retry = self._call_fut( + retry_fn=fn, + operation=operation, + target=mock.Mock(), + timeout=None, + predicate=lambda x: False, + ) + assert type_verifier(tracked_retry) diff --git a/tests/unit/data/_sync_autogen/__init__.py b/tests/unit/data/_sync_autogen/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/data/_sync_autogen/test__mutate_rows.py b/tests/unit/data/_sync_autogen/test__mutate_rows.py new file mode 100644 index 000000000..b198df01b --- /dev/null +++ b/tests/unit/data/_sync_autogen/test__mutate_rows.py @@ -0,0 +1,309 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.rpc import status_pb2 +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import Forbidden +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class TestMutateRowsOperation: + def _target_class(self): + return CrossSync._Sync_Impl._MutateRowsOperation + + def _make_one(self, *args, **kwargs): + if not args: + fake_target = CrossSync._Sync_Impl.Mock() + fake_target._request_path = {"table_name": "table"} + fake_target.app_profile_id = None + kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) + kwargs["target"] = kwargs.pop("target", fake_target) + kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) + kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) + kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) + kwargs["mutation_entries"] = kwargs.pop("mutation_entries", []) + return self._target_class()(*args, **kwargs) + + def _make_mutation(self, count=1, size=1): + mutation = RowMutationEntry("k", [DeleteAllFromRow() for _ in range(count)]) + mutation.size = lambda: size + return mutation + + def _mock_stream(self, mutation_list, error_dict): + for idx, entry in enumerate(mutation_list): + code = error_dict.get(idx, 0) + yield MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=idx, status=status_pb2.Status(code=code) + ) + ] + ) + + def _make_mock_gapic(self, mutation_list, error_dict=None): + mock_fn = CrossSync._Sync_Impl.Mock() + if error_dict is None: + error_dict = {} + mock_fn.side_effect = lambda *args, **kwargs: self._mock_stream( + mutation_list, error_dict + ) + return mock_fn + + def test_ctor(self): + """test that constructor sets all the attributes correctly""" + from google.cloud.bigtable.data._async._mutate_rows import _EntryWithProto + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import Aborted + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + attempt_timeout = 0.01 + retryable_exceptions = () + instance = self._make_one( + client, + table, + entries, + operation_timeout, + attempt_timeout, + retryable_exceptions, + ) + assert client.mutate_rows.call_count == 0 + instance._gapic_fn() + assert client.mutate_rows.call_count == 1 + entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] + assert instance.mutations == entries_w_pb + assert next(instance.timeout_generator) == attempt_timeout + assert instance.is_retryable is not None + assert instance.is_retryable(DeadlineExceeded("")) is False + assert instance.is_retryable(Aborted("")) is False + assert instance.is_retryable(_MutateRowsIncomplete("")) is True + assert instance.is_retryable(RuntimeError("")) is False + assert instance.remaining_indices == list(range(len(entries))) + assert instance.errors == {} + + def test_ctor_too_many_entries(self): + """should raise an error if an operation is created with more than 100,000 entries""" + from google.cloud.bigtable.data._async._mutate_rows import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100000 + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] * (_MUTATE_ROWS_REQUEST_MUTATION_LIMIT + 1) + operation_timeout = 0.05 + attempt_timeout = 0.01 + with pytest.raises(ValueError) as e: + self._make_one(client, table, entries, operation_timeout, attempt_timeout) + assert "mutate_rows requests can contain at most 100000 mutations" in str( + e.value + ) + assert "Found 100001" in str(e.value) + + def test_mutate_rows_operation(self): + """Test successful case of mutate_rows_operation""" + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + cls = self._target_class() + with mock.patch( + f"{cls.__module__}.{cls.__name__}._run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + assert attempt_mock.call_count == 1 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + def test_mutate_rows_attempt_exception(self, exc_type): + """exceptions raised from attempt should be raised in MutationsExceptionGroup""" + client = CrossSync._Sync_Impl.Mock() + table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_exception = exc_type("test") + client.mutate_rows.side_effect = expected_exception + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance._run_attempt() + except Exception as e: + found_exc = e + assert client.mutate_rows.call_count == 1 + assert type(found_exc) is exc_type + assert found_exc == expected_exception + assert len(instance.errors) == 2 + assert len(instance.remaining_indices) == 0 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + def test_mutate_rows_exception(self, exc_type): + """exceptions raised from retryable should be raised in MutationsExceptionGroup""" + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_cause = exc_type("abort") + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = expected_cause + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count == 1 + assert len(found_exc.exceptions) == 2 + assert isinstance(found_exc.exceptions[0], FailedMutationEntryError) + assert isinstance(found_exc.exceptions[1], FailedMutationEntryError) + assert found_exc.exceptions[0].__cause__ == expected_cause + assert found_exc.exceptions[1].__cause__ == expected_cause + + @pytest.mark.parametrize("exc_type", [DeadlineExceeded, RuntimeError]) + def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): + """If an exception fails but eventually passes, it should not raise an exception""" + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 1 + expected_cause = exc_type("retry") + num_retries = 2 + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = [expected_cause] * num_retries + [None] + instance = self._make_one( + client, + table, + entries, + operation_timeout, + operation_timeout, + retryable_exceptions=(exc_type,), + ) + instance.start() + assert attempt_mock.call_count == num_retries + 1 + + def test_mutate_rows_incomplete_ignored(self): + """MutateRowsIncomplete exceptions should not be added to error list""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 0.05 + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = _MutateRowsIncomplete("ignored") + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count > 0 + assert len(found_exc.exceptions) == 1 + assert isinstance(found_exc.exceptions[0].__cause__, DeadlineExceeded) + + def test_run_attempt_single_entry_success(self): + """Test mutating a single entry""" + mutation = self._make_mutation() + expected_timeout = 1.3 + mock_gapic_fn = self._make_mock_gapic({0: mutation}) + instance = self._make_one( + mutation_entries=[mutation], attempt_timeout=expected_timeout + ) + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + instance._run_attempt() + assert len(instance.remaining_indices) == 0 + assert mock_gapic_fn.call_count == 1 + (_, kwargs) = mock_gapic_fn.call_args + assert kwargs["timeout"] == expected_timeout + request = kwargs["request"] + assert request.entries == [mutation._to_pb()] + + def test_run_attempt_empty_request(self): + """Calling with no mutations should result in no API calls""" + mock_gapic_fn = self._make_mock_gapic([]) + instance = self._make_one(mutation_entries=[]) + instance._run_attempt() + assert mock_gapic_fn.call_count == 0 + + def test_run_attempt_partial_success_retryable(self): + """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one(mutation_entries=mutations) + instance.is_retryable = lambda x: True + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + with pytest.raises(_MutateRowsIncomplete): + instance._run_attempt() + assert instance.remaining_indices == [1] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors + + def test_run_attempt_partial_success_non_retryable(self): + """Some entries succeed, but one fails. Exception marked as non-retryable. Do not raise incomplete error""" + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one(mutation_entries=mutations) + instance.is_retryable = lambda x: False + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + instance._run_attempt() + assert instance.remaining_indices == [] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors diff --git a/tests/unit/data/_sync_autogen/test__read_rows.py b/tests/unit/data/_sync_autogen/test__read_rows.py new file mode 100644 index 000000000..a545142d3 --- /dev/null +++ b/tests/unit/data/_sync_autogen/test__read_rows.py @@ -0,0 +1,354 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class TestReadRowsOperation: + """ + Tests helper functions in the ReadRowsOperation class + in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt + is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests + """ + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + from google.cloud.bigtable.data import ReadRowsQuery + + row_limit = 91 + query = ReadRowsQuery(limit=row_limit) + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + table = mock.Mock() + table._client = client + table._request_path = {"table_name": "test_table"} + table.app_profile_id = "test_profile" + expected_operation_timeout = 42 + expected_request_timeout = 44 + time_gen_mock = mock.Mock() + subpath = "_async" if CrossSync._Sync_Impl.is_async else "_sync_autogen" + with mock.patch( + f"google.cloud.bigtable.data.{subpath}._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one( + query, + table, + operation_timeout=expected_operation_timeout, + attempt_timeout=expected_request_timeout, + ) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with( + expected_request_timeout, expected_operation_timeout + ) + assert instance._last_yielded_row_key is None + assert instance._remaining_count == row_limit + assert instance.operation_timeout == expected_operation_timeout + assert client.read_rows.call_count == 0 + assert instance.request.table_name == "test_table" + assert instance.request.app_profile_id == table.app_profile_id + assert instance.request.rows_limit == row_limit + + @pytest.mark.parametrize( + "in_keys,last_key,expected", + [ + (["b", "c", "d"], "a", ["b", "c", "d"]), + (["a", "b", "c"], "b", ["c"]), + (["a", "b", "c"], "c", []), + (["a", "b", "c"], "d", []), + (["d", "c", "b", "a"], "b", ["d", "c"]), + ], + ) + @pytest.mark.parametrize("with_range", [True, False]) + def test_revise_request_rowset_keys_with_range( + self, in_keys, last_key, expected, with_range + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + in_keys = [key.encode("utf-8") for key in in_keys] + expected = [key.encode("utf-8") for key in expected] + last_key = last_key.encode("utf-8") + if with_range: + sample_range = [RowRangePB(start_key_open=last_key)] + else: + sample_range = [] + row_set = RowSetPB(row_keys=in_keys, row_ranges=sample_range) + if not with_range and expected == []: + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == expected + assert revised.row_ranges == sample_range + + @pytest.mark.parametrize( + "in_ranges,last_key,expected", + [ + ( + [{"start_key_open": "b", "end_key_closed": "d"}], + "a", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "a", + [{"start_key_closed": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_open": "a", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "a", "end_key_open": "d"}], + "b", + [{"start_key_open": "b", "end_key_open": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), + ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), + ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), + ( + [{"end_key_closed": "z"}], + "a", + [{"start_key_open": "a", "end_key_closed": "z"}], + ), + ( + [{"end_key_open": "z"}], + "a", + [{"start_key_open": "a", "end_key_open": "z"}], + ), + ], + ) + @pytest.mark.parametrize("with_key", [True, False]) + def test_revise_request_rowset_ranges( + self, in_ranges, last_key, expected, with_key + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + next_key = (last_key + "a").encode("utf-8") + last_key = last_key.encode("utf-8") + in_ranges = [ + RowRangePB(**{k: v.encode("utf-8") for (k, v) in r.items()}) + for r in in_ranges + ] + expected = [ + RowRangePB(**{k: v.encode("utf-8") for (k, v) in r.items()}) + for r in expected + ] + if with_key: + row_keys = [next_key] + else: + row_keys = [] + row_set = RowSetPB(row_ranges=in_ranges, row_keys=row_keys) + if not with_key and expected == []: + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == row_keys + assert revised.row_ranges == expected + + @pytest.mark.parametrize("last_key", ["a", "b", "c"]) + def test_revise_request_full_table(self, last_key): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + last_key = last_key.encode("utf-8") + row_set = RowSetPB() + for selected_set in [row_set, None]: + revised = self._get_target_class()._revise_request_rowset( + selected_set, last_key + ) + assert revised.row_keys == [] + assert len(revised.row_ranges) == 1 + assert revised.row_ranges[0] == RowRangePB(start_key_open=last_key) + + def test_revise_to_empty_rowset(self): + """revising to an empty rowset should raise error""" + from google.cloud.bigtable.data.exceptions import _RowSetComplete + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + row_keys = [b"a", b"b", b"c"] + row_range = RowRangePB(end_key_open=b"c") + row_set = RowSetPB(row_keys=row_keys, row_ranges=[row_range]) + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, b"d") + + @pytest.mark.parametrize( + "start_limit,emit_num,expected_limit", + [ + (10, 0, 10), + (10, 1, 9), + (10, 10, 0), + (None, 10, None), + (None, 0, None), + (4, 2, 2), + ], + ) + def test_revise_limit(self, start_limit, emit_num, expected_limit): + """revise_limit should revise the request's limit field + - if limit is 0 (unlimited), it should never be revised + - if start_limit-emit_num == 0, the request should end early + - if the number emitted exceeds the new limit, an exception should + should be raised (tested in test_revise_limit_over_limit)""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + + def awaitable_stream(): + def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table._request_path = {"table_name": "table_name"} + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + for val in instance.chunk_stream(awaitable_stream()): + pass + assert instance._remaining_count == expected_limit + + @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) + def test_revise_limit_over_limit(self, start_limit, emit_num): + """Should raise runtime error if we get in state where emit_num > start_num + (unless start_num == 0, which represents unlimited)""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + from google.cloud.bigtable.data.exceptions import InvalidChunk + + def awaitable_stream(): + def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table._request_path = {"table_name": "table_name"} + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + with pytest.raises(InvalidChunk) as e: + for val in instance.chunk_stream(awaitable_stream()): + pass + assert "emit count exceeds row limit" in str(e.value) + + def test_close(self): + """should be able to close a stream safely with close. + Closed generators should raise StopAsyncIteration on next yield""" + + def mock_stream(): + while True: + yield 1 + + with mock.patch.object( + self._get_target_class(), "_read_rows_attempt" + ) as mock_attempt: + instance = self._make_one(mock.Mock(), mock.Mock(), 1, 1) + wrapped_gen = mock_stream() + mock_attempt.return_value = wrapped_gen + gen = instance.start_operation() + gen.__next__() + gen.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + gen.__next__() + gen.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + wrapped_gen.__next__() + + def test_retryable_ignore_repeated_rows(self): + """Duplicate rows should cause an invalid chunk error""" + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import ReadRowsResponse + + row_key = b"duplicate" + + def mock_awaitable_stream(): + def mock_stream(): + while True: + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + + return mock_stream() + + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + stream = self._get_target_class().chunk_stream( + instance, mock_awaitable_stream() + ) + stream.__next__() + with pytest.raises(InvalidChunk) as exc: + stream.__next__() + assert "row keys should be strictly increasing" in str(exc.value) diff --git a/tests/unit/data/_sync_autogen/test__swappable_channel.py b/tests/unit/data/_sync_autogen/test__swappable_channel.py new file mode 100644 index 000000000..04f3f61c8 --- /dev/null +++ b/tests/unit/data/_sync_autogen/test__swappable_channel.py @@ -0,0 +1,100 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 + +# This file is automatically generated by CrossSync. Do not edit manually. + +try: + from unittest import mock +except ImportError: + import mock +import pytest +from grpc import ChannelConnectivity +from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as TargetType, +) + + +class TestSwappableChannel: + @staticmethod + def _get_target_class(): + return TargetType + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + assert instance._channel_fn == channel_fn + channel_fn.assert_called_once_with() + assert instance._channel == channel_fn.return_value + + def test_swap_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + old_channel = instance._channel + new_channel = object() + result = instance.swap_channel(new_channel) + assert result == old_channel + assert instance._channel == new_channel + + def test_create_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + channel_fn.reset_mock() + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("unary_unary", (1,), {"kw": 2}), + ("unary_stream", (3,), {"kw": 4}), + ("stream_unary", (5,), {"kw": 6}), + ("stream_stream", (7,), {"kw": 8}), + ("get_state", (), {"try_to_connect": True}), + ], + ) + def test_forwarded_methods(self, method_name, args, kwargs): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method = getattr(channel_fn.return_value, method_name) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock_method.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("channel_ready", (), {}), + ("wait_for_state_change", (ChannelConnectivity.READY,), {}), + ], + ) + def test_forwarded_async_methods(self, method_name, args, kwargs): + def dummy_coro(*a, **k): + return mock.sentinel.result + + channel = mock.Mock() + mock_method = getattr(channel, method_name) + mock_method.side_effect = dummy_coro + channel_fn = mock.Mock(return_value=channel) + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock.sentinel.result diff --git a/tests/unit/data/_sync_autogen/test_client.py b/tests/unit/data/_sync_autogen/test_client.py new file mode 100644 index 000000000..54be1f17c --- /dev/null +++ b/tests/unit/data/_sync_autogen/test_client.py @@ -0,0 +1,3118 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +import grpc +import asyncio +import re +import pytest +import mock +from google.cloud.bigtable.data import mutations +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_v2.types import ReadRowsResponse +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.api_core import exceptions as core_exceptions +from google.api_core import client_options +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.cloud.bigtable.data import TABLE_DEFAULT +from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule +from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable.data._cross_sync import CrossSync +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + null_val, + prepare_response, + str_type, + str_val, +) +from google.api_core import grpc_helpers +from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel +from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor, +) + +CrossSync._Sync_Impl.add_mapping("grpc_helpers", grpc_helpers) +CrossSync._Sync_Impl.add_mapping("SwappableChannel", SwappableChannel) +CrossSync._Sync_Impl.add_mapping("MetricsInterceptor", BigtableMetricsInterceptor) + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestBigtableDataClient") +class TestBigtableDataClient: + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.DataClient + + @classmethod + def _make_client(cls, *args, use_emulator=True, **kwargs): + import os + + env_mask = {} + if use_emulator: + env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" + import warnings + + warnings.filterwarnings("ignore", category=RuntimeWarning) + else: + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + kwargs["project"] = kwargs.get("project", "project-id") + with mock.patch.dict(os.environ, env_mask): + return cls._get_target_class()(*args, **kwargs) + + def test_ctor(self): + expected_project = "project-id" + expected_credentials = AnonymousCredentials() + client = self._make_client( + project="project-id", credentials=expected_credentials, use_emulator=False + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert client.project == expected_project + assert not client._active_instances + assert client._channel_refresh_task is not None + assert client.transport._credentials == expected_credentials + assert isinstance( + client._metrics_interceptor, CrossSync._Sync_Impl.MetricsInterceptor + ) + client.close() + + def test_ctor_super_inits(self): + from google.cloud.client import ClientWithProject + from google.api_core import client_options as client_options_lib + + project = "project-id" + credentials = AnonymousCredentials() + client_options = {"api_endpoint": "foo.bar:1234"} + options_parsed = client_options_lib.from_dict(client_options) + with mock.patch.object( + CrossSync._Sync_Impl.GapicClient, "__init__" + ) as bigtable_client_init: + bigtable_client_init.return_value = None + with mock.patch.object( + ClientWithProject, "__init__" + ) as client_project_init: + client_project_init.return_value = None + try: + self._make_client( + project=project, + credentials=credentials, + client_options=options_parsed, + use_emulator=False, + ) + except AttributeError: + pass + assert bigtable_client_init.call_count == 1 + kwargs = bigtable_client_init.call_args[1] + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + assert client_project_init.call_count == 1 + kwargs = client_project_init.call_args[1] + assert kwargs["project"] == project + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + + def test_ctor_dict_options(self): + from google.api_core.client_options import ClientOptions + + client_options = {"api_endpoint": "foo.bar:1234"} + with mock.patch.object( + CrossSync._Sync_Impl.GapicClient, "__init__" + ) as bigtable_client_init: + try: + self._make_client(client_options=client_options) + except TypeError: + pass + bigtable_client_init.assert_called_once() + kwargs = bigtable_client_init.call_args[1] + called_options = kwargs["client_options"] + assert called_options.api_endpoint == "foo.bar:1234" + assert isinstance(called_options, ClientOptions) + with mock.patch.object( + self._get_target_class(), "_start_background_channel_refresh" + ) as start_background_refresh: + client = self._make_client( + client_options=client_options, use_emulator=False + ) + start_background_refresh.assert_called_once() + client.close() + + def test_veneer_grpc_headers(self): + client_component = "data-async" if CrossSync._Sync_Impl.is_async else "data" + VENEER_HEADER_REGEX = re.compile( + "gapic\\/[0-9]+\\.[\\w.-]+ gax\\/[0-9]+\\.[\\w.-]+ gccl\\/[0-9]+\\.[\\w.-]+-" + + client_component + + " gl-python\\/[0-9]+\\.[\\w.-]+ grpc\\/[0-9]+\\.[\\w.-]+" + ) + patch = mock.patch("google.api_core.gapic_v1.method.wrap_method") + with patch as gapic_mock: + client = self._make_client(project="project-id") + wrapped_call_list = gapic_mock.call_args_list + assert len(wrapped_call_list) > 0 + for call in wrapped_call_list: + client_info = call.kwargs["client_info"] + assert client_info is not None, f"{call} has no client_info" + wrapped_user_agent_sorted = " ".join( + sorted(client_info.to_user_agent().split(" ")) + ) + assert VENEER_HEADER_REGEX.match( + wrapped_user_agent_sorted + ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" + client.close() + + def test__start_background_channel_refresh_task_exists(self): + client = self._make_client(project="project-id", use_emulator=False) + assert client._channel_refresh_task is not None + with mock.patch.object(asyncio, "create_task") as create_task: + client._start_background_channel_refresh() + create_task.assert_not_called() + client.close() + + def test__start_background_channel_refresh(self): + client = self._make_client(project="project-id") + with mock.patch.object( + client, "_ping_and_warm_instances", CrossSync._Sync_Impl.Mock() + ) as ping_and_warm: + client._emulator_host = None + client.transport._grpc_channel = CrossSync._Sync_Impl.SwappableChannel( + mock.Mock + ) + client._start_background_channel_refresh() + assert client._channel_refresh_task is not None + assert isinstance(client._channel_refresh_task, CrossSync._Sync_Impl.Task) + CrossSync._Sync_Impl.sleep(0.1) + assert ping_and_warm.call_count == 1 + client.close() + + def test__ping_and_warm_instances(self): + """test ping and warm with mocked asyncio.gather""" + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync._Sync_Impl, "gather_partials", CrossSync._Sync_Impl.Mock() + ) as gather: + gather.side_effect = lambda partials, **kwargs: [None for _ in partials] + channel = mock.Mock() + client_mock._active_instances = [] + result = self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 0 + assert gather.call_args[1]["return_exceptions"] is True + assert gather.call_args[1]["sync_executor"] == client_mock._executor + client_mock._active_instances = [(mock.Mock(), mock.Mock())] * 4 + gather.reset_mock() + channel.reset_mock() + result = self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 4 + gather.assert_called_once() + partial_list = gather.call_args.args[0] + assert len(partial_list) == 4 + grpc_call_args = channel.unary_unary().call_args_list + for idx, (_, kwargs) in enumerate(grpc_call_args): + ( + expected_instance, + expected_app_profile, + ) = client_mock._active_instances[idx] + request = kwargs["request"] + assert request["name"] == expected_instance + assert request["app_profile_id"] == expected_app_profile + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] + == f"name={expected_instance}&app_profile_id={expected_app_profile}" + ) + + def test__ping_and_warm_single_instance(self): + """should be able to call ping and warm with single instance""" + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync._Sync_Impl, "gather_partials", CrossSync._Sync_Impl.Mock() + ) as gather: + gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] + client_mock._active_instances = [mock.Mock()] * 100 + test_key = ("test-instance", "test-app-profile") + result = self._get_target_class()._ping_and_warm_instances( + client_mock, test_key + ) + assert len(result) == 1 + grpc_call_args = ( + client_mock.transport.grpc_channel.unary_unary().call_args_list + ) + assert len(grpc_call_args) == 1 + kwargs = grpc_call_args[0][1] + request = kwargs["request"] + assert request["name"] == "test-instance" + assert request["app_profile_id"] == "test-app-profile" + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] == "name=test-instance&app_profile_id=test-app-profile" + ) + + @pytest.mark.parametrize( + "refresh_interval, wait_time, expected_sleep", + [(0, 0, 0), (0, 1, 0), (10, 0, 10), (10, 5, 5), (10, 10, 0), (10, 15, 0)], + ) + def test__manage_channel_first_sleep( + self, refresh_interval, wait_time, expected_sleep + ): + import time + + with mock.patch.object(time, "monotonic") as monotonic: + monotonic.return_value = 0 + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = asyncio.CancelledError + try: + client = self._make_client(project="project-id") + client._channel_init_time = -wait_time + client._manage_channel(refresh_interval, refresh_interval) + except asyncio.CancelledError: + pass + sleep.assert_called_once() + call_time = sleep.call_args[0][1] + assert ( + abs(call_time - expected_sleep) < 0.1 + ), f"refresh_interval: {refresh_interval}, wait_time: {wait_time}, expected_sleep: {expected_sleep}" + client.close() + + def test__manage_channel_ping_and_warm(self): + """_manage channel should call ping and warm internally""" + import threading + + client = self._make_client(project="project-id", use_emulator=True) + orig_channel = client.transport.grpc_channel + sleep_tuple = ( + (asyncio, "sleep") + if CrossSync._Sync_Impl.is_async + else (threading.Event, "wait") + ) + with mock.patch.object(*sleep_tuple) as sleep_mock: + sleep_mock.side_effect = [None, asyncio.CancelledError] + ping_and_warm = ( + client._ping_and_warm_instances + ) = CrossSync._Sync_Impl.Mock() + try: + client._manage_channel(10) + except asyncio.CancelledError: + pass + assert ping_and_warm.call_count == 2 + assert client.transport.grpc_channel._channel != orig_channel + called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] + assert orig_channel in called_with + assert client.transport.grpc_channel._channel in called_with + + @pytest.mark.parametrize( + "refresh_interval, num_cycles, expected_sleep", + [(None, 1, 60 * 35), (10, 10, 100), (10, 1, 10)], + ) + def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sleep): + import time + import random + + with mock.patch.object(random, "uniform") as uniform: + uniform.side_effect = lambda min_, max_: min_ + with mock.patch.object(time, "time") as time_mock: + time_mock.return_value = 0 + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles - 1)] + [ + asyncio.CancelledError + ] + client = self._make_client(project="project-id", use_emulator=True) + with mock.patch.object( + client.transport, "create_channel", CrossSync._Sync_Impl.Mock + ): + try: + if refresh_interval is not None: + client._manage_channel( + refresh_interval, refresh_interval, grace_period=0 + ) + else: + client._manage_channel(grace_period=0) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + total_sleep = sum([call[0][1] for call in sleep.call_args_list]) + assert ( + abs(total_sleep - expected_sleep) < 0.5 + ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" + client.close() + + def test__manage_channel_random(self): + import random + + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + with mock.patch.object(random, "uniform") as uniform: + uniform.return_value = 0 + try: + uniform.side_effect = asyncio.CancelledError + client = self._make_client(project="project-id") + except asyncio.CancelledError: + uniform.side_effect = None + uniform.reset_mock() + sleep.reset_mock() + with mock.patch.object(client.transport, "create_channel"): + min_val = 200 + max_val = 205 + uniform.side_effect = lambda min_, max_: min_ + sleep.side_effect = [None, asyncio.CancelledError] + try: + client._manage_channel(min_val, max_val, grace_period=0) + except asyncio.CancelledError: + pass + assert uniform.call_count == 2 + uniform_args = [call[0] for call in uniform.call_args_list] + for found_min, found_max in uniform_args: + assert found_min == min_val + assert found_max == max_val + + @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) + def test__manage_channel_refresh(self, num_cycles): + expected_refresh = 0.5 + grpc_lib = grpc.aio if CrossSync._Sync_Impl.is_async else grpc + new_channel = grpc_lib.insecure_channel("localhost:8080") + create_channel_mock = mock.Mock() + create_channel_mock.return_value = new_channel + refreshable_channel = CrossSync._Sync_Impl.SwappableChannel(create_channel_mock) + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] + client = self._make_client(project="project-id") + client.transport._grpc_channel = refreshable_channel + create_channel_mock.reset_mock() + sleep.reset_mock() + try: + client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=0, + ) + except RuntimeError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel_mock.call_count == num_cycles + client.close() + + def test__register_instance(self): + """test instance registration""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + expected_key = ("prefix/instance-1", table_mock.app_profile_id) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + client_mock._channel_refresh_task = mock.Mock() + table_mock2 = mock.Mock() + self._get_target_class()._register_instance( + client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2) + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + assert ( + client_mock._ping_and_warm_instances.call_args[0][0][0] + == "prefix/instance-2" + ) + assert client_mock._ping_and_warm_instances.call_count == 1 + assert len(active_instances) == 2 + assert len(instance_owners) == 2 + expected_key2 = ("prefix/instance-2", table_mock2.app_profile_id) + assert any( + [ + expected_key2 == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + assert any( + [ + expected_key2 == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + def test__register_instance_duplicate(self): + """test double instance registration. Should be no-op""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = object() + mock_channels = [mock.Mock()] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + expected_key = ("prefix/instance-1", table_mock.app_profile_id) + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + + @pytest.mark.parametrize( + "insert_instances,expected_active,expected_owner_keys", + [ + ([("i", None)], [("i", None)], [("i", None)]), + ([("i", "p")], [("i", "p")], [("i", "p")]), + ([("1", "p"), ("1", "p")], [("1", "p")], [("1", "p")]), + ( + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + ), + ], + ) + def test__register_instance_state( + self, insert_instances, expected_active, expected_owner_keys + ): + """test that active_instances and instance_owners are updated as expected""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: b + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + for instance, profile in insert_instances: + table_mock.app_profile_id = profile + self._get_target_class()._register_instance( + client_mock, instance, profile, id(table_mock) + ) + assert len(active_instances) == len(expected_active) + assert len(instance_owners) == len(expected_owner_keys) + for expected in expected_active: + assert any( + [ + expected == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + for expected in expected_owner_keys: + assert any( + [ + expected == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + def test__remove_instance_registration(self): + client = self._make_client(project="project-id") + table = mock.Mock() + client._register_instance("instance-1", table.app_profile_id, id(table)) + client._register_instance("instance-2", table.app_profile_id, id(table)) + assert len(client._active_instances) == 2 + assert len(client._instance_owners.keys()) == 2 + instance_1_path = client._gapic_client.instance_path( + client.project, "instance-1" + ) + instance_1_key = (instance_1_path, table.app_profile_id) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance-2" + ) + instance_2_key = (instance_2_path, table.app_profile_id) + assert len(client._instance_owners[instance_1_key]) == 1 + assert list(client._instance_owners[instance_1_key])[0] == id(table) + assert len(client._instance_owners[instance_2_key]) == 1 + assert list(client._instance_owners[instance_2_key])[0] == id(table) + success = client._remove_instance_registration( + "instance-1", table.app_profile_id, id(table) + ) + assert success + assert len(client._active_instances) == 1 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 1 + assert client._active_instances == {instance_2_key} + success = client._remove_instance_registration("fake-key", "profile", id(table)) + assert not success + assert len(client._active_instances) == 1 + client.close() + + def test__multiple_table_registration(self): + """registering with multiple tables with the same key should + add multiple owners to instance_owners, but only keep one copy + of shared key in active_instances""" + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + with self._make_client(project="project-id") as client: + with client.get_table("instance_1", "table_1") as table_1: + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + with client.get_table("instance_1", "table_2") as table_2: + assert table_2._register_instance_future is not None + table_2._register_instance_future.result() + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + with client.get_table( + "instance_1", "table_3", app_profile_id="diff" + ) as table_3: + assert table_3._register_instance_future is not None + table_3._register_instance_future.result() + instance_3_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_3_key = _WarmedInstanceKey( + instance_3_path, table_3.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._instance_owners[instance_3_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + assert id(table_3) in client._instance_owners[instance_3_key] + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert id(table_2) not in client._instance_owners[instance_1_key] + assert len(client._active_instances) == 0 + assert instance_1_key not in client._active_instances + assert len(client._instance_owners[instance_1_key]) == 0 + + def test__multiple_instance_registration(self): + """registering with multiple instance keys should update the key + in instance_owners and active_instances""" + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + with self._make_client(project="project-id") as client: + with client.get_table("instance_1", "table_1") as table_1: + assert table_1._register_instance_future is not None + table_1._register_instance_future.result() + with client.get_table("instance_2", "table_2") as table_2: + assert table_2._register_instance_future is not None + table_2._register_instance_future.result() + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.app_profile_id + ) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance_2" + ) + instance_2_key = _WarmedInstanceKey( + instance_2_path, table_2.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._instance_owners[instance_2_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_2_key] + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert len(client._instance_owners[instance_2_key]) == 0 + assert len(client._instance_owners[instance_1_key]) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert len(client._active_instances) == 0 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 0 + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_get_api_surface(self, method): + """test client.get_table and client.get_authorized_view""" + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + client = self._make_client(project="project-id") + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + if method == "get_table": + surface = client.get_table( + expected_instance_id, expected_table_id, expected_app_profile_id + ) + assert isinstance( + surface, CrossSync._Sync_Impl.TestTable._get_target_class() + ) + elif method == "get_authorized_view": + surface = client.get_authorized_view( + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + assert isinstance( + surface, CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + ) + assert ( + surface.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/view_id" + ) + else: + raise TypeError(f"unexpected method: {method}") + CrossSync._Sync_Impl.yield_to_event_loop() + assert surface.table_id == expected_table_id + assert ( + surface.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert surface.instance_id == expected_instance_id + assert ( + surface.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert surface.app_profile_id == expected_app_profile_id + assert surface.client is client + instance_key = _WarmedInstanceKey(surface.instance_name, surface.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(surface)} + client.close() + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_api_surface_arg_passthrough(self, method): + """All arguments passed in get_table and get_authorized_view should be sent to constructor""" + if method == "get_table": + surface_type = CrossSync._Sync_Impl.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + with self._make_client(project="project-id") as client: + with mock.patch.object(surface_type, "__init__") as mock_constructor: + mock_constructor.return_value = None + assert not client._active_instances + expected_args = ( + "table", + "instance", + "view", + "app_profile", + 1, + "test", + {"test": 2}, + ) + expected_kwargs = {"hello": "world", "test": 2} + getattr(client, method)(*expected_args, **expected_kwargs) + mock_constructor.assert_called_once_with( + client, *expected_args, **expected_kwargs + ) + + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_api_surface_context_manager(self, method): + """get_table and get_authorized_view should work as context managers""" + from functools import partial + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_project_id = "project-id" + if method == "get_table": + surface_type = CrossSync._Sync_Impl.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + with mock.patch.object(surface_type, "close") as close_mock: + with self._make_client(project=expected_project_id) as client: + if method == "get_table": + fn = partial( + client.get_table, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + elif method == "get_authorized_view": + fn = partial( + client.get_authorized_view, + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + else: + raise TypeError(f"unexpected method: {method}") + with fn() as table: + CrossSync._Sync_Impl.yield_to_event_loop() + assert isinstance(table, surface_type) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert close_mock.call_count == 1 + + def test_close(self): + client = self._make_client(project="project-id", use_emulator=False) + task = client._channel_refresh_task + assert task is not None + assert not task.done() + with mock.patch.object( + client.transport, "close", CrossSync._Sync_Impl.Mock() + ) as close_mock: + client.close() + close_mock.assert_called_once() + assert task.done() + assert client._channel_refresh_task is None + + def test_close_with_timeout(self): + expected_timeout = 19 + client = self._make_client(project="project-id", use_emulator=False) + with mock.patch.object( + CrossSync._Sync_Impl, "wait", CrossSync._Sync_Impl.Mock() + ) as wait_for_mock: + client.close(timeout=expected_timeout) + wait_for_mock.assert_called_once() + assert wait_for_mock.call_args[1]["timeout"] == expected_timeout + client.close() + + def test_context_manager(self): + from functools import partial + + close_mock = CrossSync._Sync_Impl.Mock() + true_close = None + with self._make_client(project="project-id", use_emulator=False) as client: + true_close = partial(client.close) + client.close = close_mock + assert not client._channel_refresh_task.done() + assert client.project == "project-id" + assert client._active_instances == set() + close_mock.assert_not_called() + close_mock.assert_called_once() + true_close() + + def test_default_universe_domain(self): + """When not passed, universe_domain should default to googleapis.com""" + with self._make_client(project="project-id", credentials=None) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_custom_universe_domain(self): + """test with a customized universe domain value and emulator enabled""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + with self._make_client( + project="project_id", + client_options=options, + use_emulator=True, + credentials=None, + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + def test_configured_universe_domain_matches_GDU(self): + """that configured universe domain succeeds with matched GDU credentials.""" + universe_domain = "googleapis.com" + options = client_options.ClientOptions(universe_domain=universe_domain) + with self._make_client( + project="project_id", client_options=options, credentials=None + ) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_credential_universe_domain_matches_GDU(self): + """Test with credentials""" + creds = AnonymousCredentials() + creds._universe_domain = "googleapis.com" + with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_anomynous_credential_universe_domain(self): + """Anomynopus credentials should use default universe domain""" + creds = AnonymousCredentials() + with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_configured_universe_domain_mismatched_credentials(self): + """Test that configured universe domain errors with mismatched universe + domain credentials.""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = "different-universe" + with pytest.raises(ValueError) as exc: + self._make_client( + project="project_id", + client_options=options, + use_emulator=False, + credentials=creds, + ) + err_msg = f"The configured universe domain ({universe_domain}) does not match the universe domain found in the credentials ({creds.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + assert exc.value.args[0] == err_msg + + def test_configured_universe_domain_matches_credentials(self): + """Test that configured universe domain succeeds with matching universe + domain credentials.""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = universe_domain + with self._make_client( + project="project_id", credentials=creds, client_options=options + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestTable") +class TestTable: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.Table + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, app_profile_id, **kwargs + ) + + def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + from google.cloud.bigtable.data._metrics import ( + BigtableClientSideMetricsController, + ) + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + table = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert isinstance(table._metrics, BigtableClientSideMetricsController) + assert table.default_operation_timeout == expected_operation_timeout + assert table.default_attempt_timeout == expected_attempt_timeout + assert ( + table.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + table.default_read_rows_attempt_timeout + == expected_read_rows_attempt_timeout + ) + assert ( + table.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + table.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + table._register_instance_future + assert table._register_instance_future.done() + assert not table._register_instance_future.cancelled() + assert table._register_instance_future.exception() is None + client.close() + + def test_ctor_defaults(self): + """should provide default timeout values and app_profile_id""" + client = self._make_client() + assert not client._active_instances + table = self._make_one(client) + CrossSync._Sync_Impl.yield_to_event_loop() + assert table.app_profile_id is None + assert table.client is client + assert table.default_operation_timeout == 60 + assert table.default_read_rows_operation_timeout == 600 + assert table.default_mutate_rows_operation_timeout == 600 + assert table.default_attempt_timeout == 20 + assert table.default_read_rows_attempt_timeout == 20 + assert table.default_mutate_rows_attempt_timeout == 60 + client.close() + + def test_ctor_invalid_timeout_values(self): + """bad timeout values should raise ValueError""" + client = self._make_client() + timeout_pairs = [ + ("default_operation_timeout", "default_attempt_timeout"), + ( + "default_read_rows_operation_timeout", + "default_read_rows_attempt_timeout", + ), + ( + "default_mutate_rows_operation_timeout", + "default_mutate_rows_attempt_timeout", + ), + ] + for operation_timeout, attempt_timeout in timeout_pairs: + with pytest.raises(ValueError) as e: + self._make_one(client, **{attempt_timeout: -1}) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(client, **{operation_timeout: -1}) + assert "operation_timeout must be greater than 0" in str(e.value) + client.close() + + @pytest.mark.parametrize( + "fn_name,fn_args,is_stream,extra_retryables", + [ + ("read_rows_stream", (ReadRowsQuery(),), True, ()), + ("read_rows", (ReadRowsQuery(),), True, ()), + ("read_row", (b"row_key",), True, ()), + ("read_rows_sharded", ([ReadRowsQuery()],), True, ()), + ("row_exists", (b"row_key",), True, ()), + ("sample_row_keys", (), False, ()), + ("mutate_row", (b"row_key", [DeleteAllFromRow()]), False, ()), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [DeleteAllFromRow()])],), + False, + (_MutateRowsIncomplete,), + ), + ], + ) + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + core_exceptions.Cancelled, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + def test_customizable_retryable_errors( + self, + input_retryables, + expected_retryables, + fn_name, + fn_args, + is_stream, + extra_retryables, + ): + """Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer.""" + retry_fn = "retry_target" + if is_stream: + retry_fn += "_stream" + retry_fn = f"CrossSync._Sync_Impl.{retry_fn}" + with mock.patch( + f"google.cloud.bigtable.data._cross_sync.{retry_fn}" + ) as retry_fn_mock: + with self._make_client() as client: + table = client.get_table("instance-id", "table-id") + expected_predicate = expected_retryables.__contains__ + retry_fn_mock.side_effect = RuntimeError("stop early") + with mock.patch( + "google.api_core.retry.if_exception_type" + ) as predicate_builder_mock: + predicate_builder_mock.return_value = expected_predicate + with pytest.raises(Exception): + test_fn = table.__getattribute__(fn_name) + test_fn(*fn_args, retryable_errors=input_retryables) + predicate_builder_mock.assert_called_once_with( + *expected_retryables, *extra_retryables + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + assert retry_call_args[1] is expected_predicate + + @pytest.mark.parametrize( + "fn_name,fn_args,gapic_fn", + [ + ("read_rows_stream", (ReadRowsQuery(),), "read_rows"), + ("read_rows", (ReadRowsQuery(),), "read_rows"), + ("read_row", (b"row_key",), "read_rows"), + ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), + ("row_exists", (b"row_key",), "read_rows"), + ("sample_row_keys", (), "sample_row_keys"), + ("mutate_row", (b"row_key", [mutations.DeleteAllFromRow()]), "mutate_row"), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + "mutate_rows", + ), + ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), + ( + "read_modify_write_row", + (b"row_key", IncrementRule("f", "q")), + "read_modify_write_row", + ), + ], + ) + @pytest.mark.parametrize("include_app_profile", [True, False]) + def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): + profile = "profile" if include_app_profile else None + client = self._make_client() + transport_mock = mock.MagicMock() + rpc_mock = CrossSync._Sync_Impl.Mock() + transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock + gapic_client = client._gapic_client + gapic_client._transport = transport_mock + gapic_client._is_universe_domain_valid = True + table = self._make_one(client, app_profile_id=profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = test_fn(*fn_args) + [i for i in maybe_stream] + except Exception: + pass + assert rpc_mock.call_count == 1 + kwargs = rpc_mock.call_args_list[0][1] + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + routing_str = metadata[0][1] + assert f"table_name={table.table_name}" in routing_str + if include_app_profile: + assert "app_profile_id=profile" in routing_str + else: + assert "app_profile_id=" in routing_str + + def test_close(self): + client = self._make_client() + table = self._make_one(client) + with mock.patch.object( + table._metrics, "close", mock.Mock() + ) as metric_close_mock: + with mock.patch.object( + client, "_remove_instance_registration" + ) as remove_mock: + table.close() + remove_mock.assert_called_once_with( + table.instance_id, table.app_profile_id, id(table) + ) + metric_close_mock.assert_called_once() + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestAuthorizedView") +class TestAuthorizedView(CrossSync._Sync_Impl.TestTable): + """ + Inherit tests from TestTableAsync, with some modifications + """ + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.AuthorizedView + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + view_id="view", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, view_id, app_profile_id, **kwargs + ) + + def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + from google.cloud.bigtable.data._metrics import ( + BigtableClientSideMetricsController, + ) + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_view_id = "view_id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + view = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_view_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert view.table_id == expected_table_id + assert ( + view.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert view.instance_id == expected_instance_id + assert ( + view.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert view.authorized_view_id == expected_view_id + assert ( + view.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/{expected_view_id}" + ) + assert view.app_profile_id == expected_app_profile_id + assert view.client is client + instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(view)} + assert isinstance(view._metrics, BigtableClientSideMetricsController) + assert view.default_operation_timeout == expected_operation_timeout + assert view.default_attempt_timeout == expected_attempt_timeout + assert ( + view.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + view.default_read_rows_attempt_timeout == expected_read_rows_attempt_timeout + ) + assert ( + view.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + view.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + view._register_instance_future + assert view._register_instance_future.done() + assert not view._register_instance_future.cancelled() + assert view._register_instance_future.exception() is None + client.close() + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestReadRows") +class TestReadRows: + """ + Tests for table.read_rows and related methods. + """ + + @staticmethod + def _get_operation_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _make_table(self, *args, **kwargs): + client_mock = mock.Mock() + client_mock._register_instance.side_effect = ( + lambda *args, **kwargs: CrossSync._Sync_Impl.yield_to_event_loop() + ) + client_mock._remove_instance_registration.side_effect = ( + lambda *args, **kwargs: CrossSync._Sync_Impl.yield_to_event_loop() + ) + kwargs["instance_id"] = kwargs.get( + "instance_id", args[0] if args else "instance" + ) + kwargs["table_id"] = kwargs.get( + "table_id", args[1] if len(args) > 1 else "table" + ) + client_mock._gapic_client.table_path.return_value = kwargs["table_id"] + client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] + return CrossSync._Sync_Impl.TestTable._get_target_class()( + client_mock, *args, **kwargs + ) + + def _make_stats(self): + from google.cloud.bigtable_v2.types import RequestStats + from google.cloud.bigtable_v2.types import FullReadStatsView + from google.cloud.bigtable_v2.types import ReadIterationStats + + return RequestStats( + full_read_stats_view=FullReadStatsView( + read_iteration_stats=ReadIterationStats( + rows_seen_count=1, + rows_returned_count=2, + cells_seen_count=3, + cells_returned_count=4, + ) + ) + ) + + @staticmethod + def _make_chunk(*args, **kwargs): + from google.cloud.bigtable_v2 import ReadRowsResponse + + kwargs["row_key"] = kwargs.get("row_key", b"row_key") + kwargs["family_name"] = kwargs.get("family_name", "family_name") + kwargs["qualifier"] = kwargs.get("qualifier", b"qualifier") + kwargs["value"] = kwargs.get("value", b"value") + kwargs["commit_row"] = kwargs.get("commit_row", True) + return ReadRowsResponse.CellChunk(*args, **kwargs) + + @staticmethod + def _make_gapic_stream( + chunk_list: list[ReadRowsResponse.CellChunk | Exception], sleep_time=0 + ): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list, sleep_time): + self.chunk_list = chunk_list + self.idx = -1 + self.sleep_time = sleep_time + + def __iter__(self): + return self + + def __next__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + if sleep_time: + CrossSync._Sync_Impl.sleep(self.sleep_time) + chunk = self.chunk_list[self.idx] + if isinstance(chunk, Exception): + raise chunk + else: + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync._Sync_Impl.StopIteration + + def cancel(self): + pass + + return mock_stream(chunk_list, sleep_time) + + def execute_fn(self, table, *args, **kwargs): + return table.read_rows(*args, **kwargs) + + def test_read_rows(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + results = self.execute_fn(table, query, operation_timeout=3) + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + def test_read_rows_stream(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + gen = table.read_rows_stream(query, operation_timeout=3) + results = [row for row in gen] + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @pytest.mark.parametrize("include_app_profile", [True, False]) + def test_read_rows_query_matches_request(self, include_app_profile): + from google.cloud.bigtable.data import RowRange + from google.cloud.bigtable.data.row_filters import PassAllFilter + + app_profile_id = "app_profile_id" if include_app_profile else None + with self._make_table(app_profile_id=app_profile_id) as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream([]) + row_keys = [b"test_1", "test_2"] + row_ranges = RowRange("1start", "2end") + filter_ = PassAllFilter(True) + limit = 99 + query = ReadRowsQuery( + row_keys=row_keys, + row_ranges=row_ranges, + row_filter=filter_, + limit=limit, + ) + results = table.read_rows(query, operation_timeout=3) + assert len(results) == 0 + call_request = read_rows.call_args_list[0][0][0] + query_pb = query._to_pb(table) + assert call_request == query_pb + + @pytest.mark.parametrize("operation_timeout", [0.001, 0.023, 0.1]) + def test_read_rows_timeout(self, operation_timeout): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + query = ReadRowsQuery() + chunks = [self._make_chunk(row_key=b"test_1")] + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=0.15 + ) + try: + table.read_rows(query, operation_timeout=operation_timeout) + except core_exceptions.DeadlineExceeded as e: + assert ( + e.message + == f"operation_timeout of {operation_timeout:0.1f}s exceeded" + ) + + @pytest.mark.parametrize( + "per_request_t, operation_t, expected_num", [(0.1, 0.19, 2), (0.1, 0.29, 3)] + ) + def test_read_rows_attempt_timeout(self, per_request_t, operation_t, expected_num): + """Ensures that the attempt_timeout is respected and that the number of + requests is as expected. + + operation_timeout does not cancel the request, so we expect the number of + requests to be the ceiling of operation_timeout / attempt_timeout.""" + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + expected_last_timeout = operation_t - (expected_num - 1) * per_request_t + with mock.patch("random.uniform", side_effect=lambda a, b: 0): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=per_request_t + ) + query = ReadRowsQuery() + chunks = [core_exceptions.DeadlineExceeded("mock deadline")] + try: + table.read_rows( + query, + operation_timeout=operation_t, + attempt_timeout=per_request_t, + ) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + if expected_num == 0: + assert retry_exc is None + else: + assert type(retry_exc) is RetryExceptionGroup + assert f"{expected_num} failed attempts" in str(retry_exc) + assert len(retry_exc.exceptions) == expected_num + for sub_exc in retry_exc.exceptions: + assert sub_exc.message == "mock deadline" + assert read_rows.call_count == expected_num + for _, call_kwargs in read_rows.call_args_list[:-1]: + assert call_kwargs["timeout"] == per_request_t + assert call_kwargs["retry"] is None + assert ( + abs( + read_rows.call_args_list[-1][1]["timeout"] + - expected_last_timeout + ) + < 0.05 + ) + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Aborted, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + def test_read_rows_retryable_error(self, exc_type): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + table.read_rows(query, operation_timeout=0.1) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + root_cause = retry_exc.exceptions[0] + assert type(root_cause) is exc_type + assert root_cause == expected_error + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.PreconditionFailed, + core_exceptions.NotFound, + core_exceptions.PermissionDenied, + core_exceptions.Conflict, + core_exceptions.InternalServerError, + core_exceptions.TooManyRequests, + core_exceptions.ResourceExhausted, + InvalidChunk, + ], + ) + def test_read_rows_non_retryable_error(self, exc_type): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + table.read_rows(query, operation_timeout=0.1) + except exc_type as e: + assert e == expected_error + + def test_read_rows_revise_request(self): + """Ensure that _revise_request is called between retries""" + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import RowSet + + return_val = RowSet() + with mock.patch.object( + self._get_operation_class(), "_revise_request_rowset" + ) as revise_rowset: + revise_rowset.return_value = return_val + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + row_keys = [b"test_1", b"test_2", b"test_3"] + query = ReadRowsQuery(row_keys=row_keys) + chunks = [ + self._make_chunk(row_key=b"test_1"), + core_exceptions.Aborted("mock retryable error"), + ] + try: + table.read_rows(query) + except InvalidChunk: + revise_rowset.assert_called() + first_call_kwargs = revise_rowset.call_args_list[0].kwargs + assert first_call_kwargs["row_set"] == query._to_pb(table).rows + assert first_call_kwargs["last_seen_row_key"] == b"test_1" + revised_call = read_rows.call_args_list[1].args[0] + assert revised_call.rows == return_val + + def test_read_rows_default_timeouts(self): + """Ensure that the default timeouts are set on the read rows operation when not overridden""" + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + with self._make_table( + default_read_rows_operation_timeout=operation_timeout, + default_read_rows_attempt_timeout=attempt_timeout, + ) as table: + try: + table.read_rows(ReadRowsQuery()) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + def test_read_rows_default_timeout_override(self): + """When timeouts are passed, they overwrite default values""" + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + with self._make_table( + default_operation_timeout=99, default_attempt_timeout=97 + ) as table: + try: + table.read_rows( + ReadRowsQuery(), + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + ) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + def test_read_row(self): + """Test reading a single row""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + row = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert row == expected_result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + def test_read_row_w_filter(self): + """Test reading a single row with an added filter""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + mock_filter = mock.Mock() + expected_filter = {"filter": "mock filter"} + mock_filter._to_dict.return_value = expected_filter + row = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + row_filter=expected_filter, + ) + assert row == expected_result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter == expected_filter + + def test_read_row_no_response(self): + """should return None if row does not exist""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = lambda *args, **kwargs: [] + expected_op_timeout = 8 + expected_req_timeout = 4 + result = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert result is None + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @pytest.mark.parametrize( + "return_value,expected_result", + [([], False), ([object()], True), ([object(), object()], True)], + ) + def test_row_exists(self, return_value, expected_result): + """Test checking for row existence""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = lambda *args, **kwargs: return_value + expected_op_timeout = 1 + expected_req_timeout = 2 + result = table.row_exists( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert expected_result == result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + expected_filter = { + "chain": { + "filters": [ + {"cells_per_row_limit_filter": 1}, + {"strip_value_transformer": True}, + ] + } + } + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter._to_dict() == expected_filter + + +class TestReadRowsSharded: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def test_read_rows_sharded_empty_query(self): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as exc: + table.read_rows_sharded([]) + assert "empty sharded_query" in str(exc.value) + + def test_read_rows_sharded_multiple_queries(self): + """Test with multiple queries. Should return results from both""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.side_effect = lambda *args, **kwargs: CrossSync._Sync_Impl.TestReadRows._make_gapic_stream( + [ + CrossSync._Sync_Impl.TestReadRows._make_chunk(row_key=k) + for k in args[0].rows.row_keys + ] + ) + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + result = table.read_rows_sharded([query_1, query_2]) + assert len(result) == 2 + assert result[0].row_key == b"test_1" + assert result[1].row_key == b"test_2" + + @pytest.mark.parametrize("n_queries", [1, 2, 5, 11, 24]) + def test_read_rows_sharded_multiple_queries_calls(self, n_queries): + """Each query should trigger a separate read_rows call""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + query_list = [ReadRowsQuery() for _ in range(n_queries)] + table.read_rows_sharded(query_list) + assert read_rows.call_count == n_queries + + def test_read_rows_sharded_errors(self): + """Errors should be exposed as ShardedReadRowsExceptionGroups""" + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = RuntimeError("mock error") + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded([query_1, query_2]) + exc_group = exc.value + assert isinstance(exc_group, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == 2 + assert isinstance(exc.value.exceptions[0], FailedQueryShardError) + assert isinstance(exc.value.exceptions[0].__cause__, RuntimeError) + assert exc.value.exceptions[0].index == 0 + assert exc.value.exceptions[0].query == query_1 + assert isinstance(exc.value.exceptions[1], FailedQueryShardError) + assert isinstance(exc.value.exceptions[1].__cause__, RuntimeError) + assert exc.value.exceptions[1].index == 1 + assert exc.value.exceptions[1].query == query_2 + + def test_read_rows_sharded_concurrent(self): + """Ensure sharded requests are concurrent""" + import time + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.1) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(10)] + start_time = time.monotonic() + result = table.read_rows_sharded(queries) + call_time = time.monotonic() - start_time + assert read_rows.call_count == 10 + assert len(result) == 10 + assert call_time < 0.5 + + def test_read_rows_sharded_concurrency_limit(self): + """Only 10 queries should be processed concurrently. Others should be queued + + Should start a new query as soon as previous finishes""" + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + + assert _CONCURRENCY_LIMIT == 10 + num_queries = 15 + increment_time = 0.05 + max_time = increment_time * (_CONCURRENCY_LIMIT - 1) + rpc_times = [min(i * increment_time, max_time) for i in range(num_queries)] + + def mock_call(*args, **kwargs): + next_sleep = rpc_times.pop(0) + asyncio.sleep(next_sleep) + return [mock.Mock()] + + starting_timeout = 10 + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + table.read_rows_sharded(queries, operation_timeout=starting_timeout) + assert read_rows.call_count == num_queries + rpc_start_list = [ + starting_timeout - kwargs["operation_timeout"] + for (_, kwargs) in read_rows.call_args_list + ] + eps = 0.01 + assert all( + (rpc_start_list[i] < eps for i in range(_CONCURRENCY_LIMIT)) + ) + for i in range(num_queries - _CONCURRENCY_LIMIT): + idx = i + _CONCURRENCY_LIMIT + assert rpc_start_list[idx] - i * increment_time < eps + + def test_read_rows_sharded_expirary(self): + """If the operation times out before all shards complete, should raise + a ShardedReadRowsExceptionGroup""" + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + operation_timeout = 0.1 + num_queries = 15 + sleeps = [0] * _CONCURRENCY_LIMIT + [DeadlineExceeded("times up")] * ( + num_queries - _CONCURRENCY_LIMIT + ) + + def mock_call(*args, **kwargs): + next_item = sleeps.pop(0) + if isinstance(next_item, Exception): + raise next_item + else: + asyncio.sleep(next_item) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded( + queries, operation_timeout=operation_timeout + ) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == num_queries - _CONCURRENCY_LIMIT + assert len(exc.value.successful_rows) == _CONCURRENCY_LIMIT + + def test_read_rows_sharded_negative_batch_timeout(self): + """try to run with batch that starts after operation timeout + + They should raise DeadlineExceeded errors""" + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.api_core.exceptions import DeadlineExceeded + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.06) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + num_calls = 15 + queries = [ReadRowsQuery() for _ in range(num_calls)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded(queries, operation_timeout=0.05) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) >= num_calls - _CONCURRENCY_LIMIT + assert all( + ( + isinstance(e.__cause__, DeadlineExceeded) + for e in exc.value.exceptions + ) + ) + + +class TestSampleRowKeys: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): + from google.cloud.bigtable_v2.types import SampleRowKeysResponse + + for value in sample_list: + yield SampleRowKeysResponse(row_key=value[0], offset_bytes=value[1]) + + def test_sample_row_keys(self): + """Test that method returns the expected key samples""" + samples = [(b"test_1", 0), (b"test_2", 100), (b"test_3", 200)] + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream(samples) + result = table.sample_row_keys() + assert len(result) == 3 + assert all((isinstance(r, tuple) for r in result)) + assert all((isinstance(r[0], bytes) for r in result)) + assert all((isinstance(r[1], int) for r in result)) + assert result[0] == samples[0] + assert result[1] == samples[1] + assert result[2] == samples[2] + + def test_sample_row_keys_bad_timeout(self): + """should raise error if timeout is negative""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.sample_row_keys(operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + table.sample_row_keys(attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + def test_sample_row_keys_default_timeout(self): + """Should fallback to using table default operation_timeout""" + expected_timeout = 99 + with self._make_client() as client: + with client.get_table( + "i", + "t", + default_operation_timeout=expected_timeout, + default_attempt_timeout=expected_timeout, + ) as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + result = table.sample_row_keys() + (_, kwargs) = sample_row_keys.call_args + assert abs(kwargs["timeout"] - expected_timeout) < 0.1 + assert result == [] + assert kwargs["retry"] is None + + def test_sample_row_keys_gapic_params(self): + """make sure arguments are propagated to gapic call as expected""" + expected_timeout = 10 + expected_profile = "test1" + instance = "instance_name" + table_id = "my_table" + with self._make_client() as client: + with client.get_table( + instance, table_id, app_profile_id=expected_profile + ) as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + table.sample_row_keys(attempt_timeout=expected_timeout) + (args, kwargs) = sample_row_keys.call_args + assert len(args) == 0 + assert len(kwargs) == 3 + assert kwargs["timeout"] == expected_timeout + assert kwargs["retry"] is None + request = kwargs["request"] + assert request.app_profile_id == expected_profile + assert request.table_name == table.table_name + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_sample_row_keys_retryable_errors(self, retryable_exception): + """retryable errors should be retried until timeout""" + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + table.sample_row_keys(operation_timeout=0.05) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) > 0 + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + def test_sample_row_keys_non_retryable_errors(self, non_retryable_exception): + """non-retryable errors should cause a raise""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + table.sample_row_keys() + + +class TestMutateRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize( + "mutation_arg", + [ + mutations.SetCell("family", b"qualifier", b"value"), + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ), + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromFamily("family"), + mutations.DeleteAllFromRow(), + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + def test_mutate_row(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.return_value = None + table.mutate_row( + "row_key", + mutation_arg, + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0].kwargs + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.row_key == b"row_key" + formatted_mutations = ( + [mutation._to_pb() for mutation in mutation_arg] + if isinstance(mutation_arg, list) + else [mutation_arg._to_pb()] + ) + assert request.mutations == formatted_mutations + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_mutate_row_retryable_errors(self, retryable_exception): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + mutation = mutations.DeleteAllFromRow() + assert mutation.is_idempotent() is True + table.mutate_row("row_key", mutation, operation_timeout=0.01) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_mutate_row_non_idempotent_retryable_errors(self, retryable_exception): + """Non-idempotent mutations should not be retried""" + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(retryable_exception): + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + assert mutation.is_idempotent() is False + table.mutate_row("row_key", mutation, operation_timeout=0.2) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + def test_mutate_row_non_retryable_errors(self, non_retryable_exception): + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + mutation = mutations.SetCell( + "family", + b"qualifier", + b"value", + timestamp_micros=1234567890, + ) + assert mutation.is_idempotent() is True + table.mutate_row("row_key", mutation, operation_timeout=0.2) + + @pytest.mark.parametrize("mutations", [[], None]) + def test_mutate_row_no_mutations(self, mutations): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.mutate_row("key", mutations=mutations) + assert e.value.args[0] == "No mutations provided" + + +class TestBulkMutateRows: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _mock_response(self, response_list): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + statuses = [] + for response in response_list: + if isinstance(response, core_exceptions.GoogleAPICallError): + statuses.append( + status_pb2.Status( + message=str(response), code=response.grpc_status_code.value[0] + ) + ) + else: + statuses.append(status_pb2.Status(code=0)) + entries = [ + MutateRowsResponse.Entry(index=i, status=statuses[i]) + for i in range(len(response_list)) + ] + + def generator(): + yield MutateRowsResponse(entries=entries) + + return generator() + + @pytest.mark.parametrize( + "mutation_arg", + [ + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ) + ], + [mutations.DeleteRangeFromColumn("family", b"qualifier")], + [mutations.DeleteAllFromFamily("family")], + [mutations.DeleteAllFromRow()], + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + def test_bulk_mutate_rows(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None]) + bulk_mutation = mutations.RowMutationEntry(b"row_key", mutation_arg) + table.bulk_mutate_rows( + [bulk_mutation], attempt_timeout=expected_attempt_timeout + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.entries == [bulk_mutation._to_pb()] + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + def test_bulk_mutate_rows_multiple_entries(self): + """Test mutations with no errors""" + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None, None]) + mutation_list = [mutations.DeleteAllFromRow()] + entry_1 = mutations.RowMutationEntry(b"row_key_1", mutation_list) + entry_2 = mutations.RowMutationEntry(b"row_key_2", mutation_list) + table.bulk_mutate_rows([entry_1, entry_2]) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert ( + request.table_name + == "projects/project/instances/instance/tables/table" + ) + assert request.entries[0] == entry_1._to_pb() + assert request.entries[1] == entry_2._to_pb() + + @pytest.mark.parametrize( + "exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_rows_idempotent_mutation_error_retryable(self, exception): + """Individual idempotent mutations should be retried if they fail with a retryable error""" + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], exception) + assert isinstance( + cause.exceptions[-1], core_exceptions.DeadlineExceeded + ) + + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + core_exceptions.Aborted, + ], + ) + def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable(self, exception): + """Individual idempotent mutations should not be retried if they fail with a non-retryable error""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_idempotent_retryable_request_errors(self, retryable_exception): + """Individual idempotent mutations should be retried if the request fails with a retryable error""" + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_rows_non_idempotent_retryable_errors( + self, retryable_exception + ): + """Non-Idempotent mutations should never be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [retryable_exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is False + table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + ], + ) + def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_exception): + """If the request fails with a non-retryable error, mutations should not be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, non_retryable_exception) + + def test_bulk_mutate_error_index(self): + """Test partial failure, partial success. Errors should be associated with the correct index""" + from google.api_core.exceptions import ( + DeadlineExceeded, + ServiceUnavailable, + FailedPrecondition, + ) + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = [ + self._mock_response([None, ServiceUnavailable("mock"), None]), + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([FailedPrecondition("final")]), + ] + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry( + f"row_key_{i}".encode(), [mutation] + ) + for i in range(3) + ] + assert mutation.is_idempotent() is True + table.bulk_mutate_rows(entries, operation_timeout=1000) + assert len(e.value.exceptions) == 1 + failed = e.value.exceptions[0] + assert isinstance(failed, FailedMutationEntryError) + assert failed.index == 1 + assert failed.entry == entries[1] + cause = failed.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) == 3 + assert isinstance(cause.exceptions[0], ServiceUnavailable) + assert isinstance(cause.exceptions[1], DeadlineExceeded) + assert isinstance(cause.exceptions[2], FailedPrecondition) + + def test_bulk_mutate_error_recovery(self): + """If an error occurs, then resolves, no exception should be raised""" + from google.api_core.exceptions import DeadlineExceeded + + with self._make_client(project="project") as client: + table = client.get_table("instance", "table") + with mock.patch.object(client._gapic_client, "mutate_rows") as mock_gapic: + mock_gapic.side_effect = [ + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([None]), + ] + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry(f"row_key_{i}".encode(), [mutation]) + for i in range(3) + ] + table.bulk_mutate_rows(entries, operation_timeout=1000) + + +class TestCheckAndMutateRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize("gapic_result", [True, False]) + def test_check_and_mutate(self, gapic_result): + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + app_profile = "app_profile_id" + with self._make_client() as client: + with client.get_table( + "instance", "table", app_profile_id=app_profile + ) as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=gapic_result + ) + row_key = b"row_key" + predicate = None + true_mutations = [DeleteAllFromRow()] + false_mutations = [DeleteAllFromRow(), DeleteAllFromRow()] + operation_timeout = 0.2 + found = table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + operation_timeout=operation_timeout, + ) + assert found == gapic_result + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.table_name == table.table_name + assert request.row_key == row_key + assert bool(request.predicate_filter) is False + assert request.true_mutations == [ + m._to_pb() for m in true_mutations + ] + assert request.false_mutations == [ + m._to_pb() for m in false_mutations + ] + assert request.app_profile_id == app_profile + assert kwargs["timeout"] == operation_timeout + assert kwargs["retry"] is None + + def test_check_and_mutate_bad_timeout(self): + """Should raise error if operation_timeout < 0""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=[mock.Mock()], + false_case_mutations=[], + operation_timeout=-1, + ) + assert str(e.value) == "operation_timeout must be greater than 0" + + def test_check_and_mutate_single_mutations(self): + """if single mutations are passed, they should be internally wrapped in a list""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + true_mutation = SetCell("family", b"qualifier", b"value") + false_mutation = SetCell("family", b"qualifier", b"value") + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.true_mutations == [true_mutation._to_pb()] + assert request.false_mutations == [false_mutation._to_pb()] + + def test_check_and_mutate_predicate_object(self): + """predicate filter should be passed to gapic request""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable_v2.types.data import RowFilter + + mock_predicate = mock.Mock() + predicate_pb = RowFilter({"sink": True}) + mock_predicate._to_pb.return_value = predicate_pb + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + table.check_and_mutate_row( + b"row_key", + mock_predicate, + false_case_mutations=[DeleteAllFromRow()], + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.predicate_filter == predicate_pb + assert mock_predicate._to_pb.call_count == 1 + assert kwargs["retry"] is None + + def test_check_and_mutate_mutations_parsing(self): + """mutations objects should be converted to protos""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily + + mutations = [mock.Mock() for _ in range(5)] + for idx, mutation in enumerate(mutations): + mutation._to_pb.return_value = DeleteAllFromFamily(f"fake {idx}")._to_pb() + mutations.append(DeleteAllFromRow()) + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=mutations[0:2], + false_case_mutations=mutations[2:], + ) + kwargs = mock_gapic.call_args[1] + request = kwargs["request"] + assert request.true_mutations == [ + DeleteAllFromFamily("fake 0")._to_pb(), + DeleteAllFromFamily("fake 1")._to_pb(), + ] + assert request.false_mutations == [ + DeleteAllFromFamily("fake 2")._to_pb(), + DeleteAllFromFamily("fake 3")._to_pb(), + DeleteAllFromFamily("fake 4")._to_pb(), + DeleteAllFromRow()._to_pb(), + ] + assert all( + (mutation._to_pb.call_count == 1 for mutation in mutations[:5]) + ) + + +class TestReadModifyWriteRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize( + "call_rules,expected_rules", + [ + ( + AppendValueRule("f", "c", b"1"), + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + ( + [AppendValueRule("f", "c", b"1")], + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + (IncrementRule("f", "c", 1), [IncrementRule("f", "c", 1)._to_pb()]), + ( + [AppendValueRule("f", "c", b"1"), IncrementRule("f", "c", 1)], + [ + AppendValueRule("f", "c", b"1")._to_pb(), + IncrementRule("f", "c", 1)._to_pb(), + ], + ), + ], + ) + def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): + """Test that the gapic call is called with given rules""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row("key", call_rules) + assert mock_gapic.call_count == 1 + found_kwargs = mock_gapic.call_args_list[0][1] + request = found_kwargs["request"] + assert request.rules == expected_rules + assert found_kwargs["retry"] is None + + @pytest.mark.parametrize("rules", [[], None]) + def test_read_modify_write_no_rules(self, rules): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.read_modify_write_row("key", rules=rules) + assert e.value.args[0] == "rules must contain at least one item" + + def test_read_modify_write_call_defaults(self): + instance = "instance1" + table_id = "table1" + project = "project1" + row_key = "row_key1" + with self._make_client(project=project) as client: + with client.get_table(instance, table_id) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row(row_key, IncrementRule("f", "q")) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert ( + request.table_name + == f"projects/{project}/instances/{instance}/tables/{table_id}" + ) + assert bool(request.app_profile_id) is False + assert request.row_key == row_key.encode() + assert kwargs["timeout"] > 1 + + def test_read_modify_write_call_overrides(self): + row_key = b"row_key1" + expected_timeout = 12345 + profile_id = "profile1" + with self._make_client() as client: + with client.get_table( + "instance", "table_id", app_profile_id=profile_id + ) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row( + row_key, + IncrementRule("f", "q"), + operation_timeout=expected_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert request.app_profile_id == profile_id + assert request.row_key == row_key + assert kwargs["timeout"] == expected_timeout + + def test_read_modify_write_string_key(self): + row_key = "string_row_key1" + with self._make_client() as client: + with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row(row_key, IncrementRule("f", "q")) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] + assert request.row_key == row_key.encode() + + def test_read_modify_write_row_building(self): + """results from gapic call should be used to construct row""" + from google.cloud.bigtable.data.row import Row + from google.cloud.bigtable_v2.types import ReadModifyWriteRowResponse + from google.cloud.bigtable_v2.types import Row as RowPB + + mock_response = ReadModifyWriteRowResponse(row=RowPB()) + with self._make_client() as client: + with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + with mock.patch.object(Row, "_from_pb") as constructor_mock: + mock_gapic.return_value = mock_response + table.read_modify_write_row("key", IncrementRule("f", "q")) + assert constructor_mock.call_count == 1 + constructor_mock.assert_called_once_with(mock_response.row) + + +class TestExecuteQuery: + TABLE_NAME = "TABLE_NAME" + INSTANCE_NAME = "INSTANCE_NAME" + + @pytest.fixture(scope="function") + def client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.fixture(scope="function") + def execute_query_mock(self, client): + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + yield execute_query_mock + + @pytest.fixture(scope="function") + def prepare_mock(self, client): + with mock.patch.object( + client._gapic_client, "prepare_query", CrossSync._Sync_Impl.Mock() + ) as prepare_mock: + prepare_mock.return_value = prepare_response( + prepared_query=b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ) + yield prepare_mock + + def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): + class MockStream: + def __init__(self, sample_list): + self.sample_list = sample_list + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __next__(self): + if not self.sample_list: + raise CrossSync._Sync_Impl.StopIteration + value = self.sample_list.pop(0) + if isinstance(value, Exception): + raise value + return value + + def __anext__(self): + return self.__next__() + + return MockStream(sample_list) + + def test_execute_query(self, client, execute_query_mock, prepare_mock): + values = [ + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + def test_execute_query_with_params(self, client, execute_query_mock, prepare_mock): + values = [*chunked_responses(2, str_val("test2"), int_val(9), token=b"r2")] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + def test_execute_query_error_before_metadata( + self, client, execute_query_mock, prepare_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + + def test_execute_query_error_after_metadata( + self, client, execute_query_mock, prepare_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + def test_execute_query_with_retries(self, client, execute_query_mock, prepare_mock): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + DeadlineExceeded(""), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + DeadlineExceeded(""), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + assert prepare_mock.call_count == 1 + + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.DeadlineExceeded(""), + core_exceptions.Aborted(""), + core_exceptions.ServiceUnavailable(""), + ], + ) + def test_execute_query_retryable_error( + self, client, execute_query_mock, prepare_mock, exception + ): + [res1, res2] = chunked_responses( + 2, str_val("test"), int_val(8), reset=True, token=b"t1" + ) + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), + exception, + *chunked_responses(1, str_val("tes2"), int_val(9), reset=True, token=b"t1"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 2 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + core_exceptions.InvalidArgument, + core_exceptions.FailedPrecondition, + core_exceptions.PermissionDenied, + core_exceptions.MethodNotImplemented, + core_exceptions.Cancelled, + core_exceptions.AlreadyExists, + core_exceptions.OutOfRange, + core_exceptions.DataLoss, + core_exceptions.Unauthenticated, + core_exceptions.NotFound, + core_exceptions.ResourceExhausted, + core_exceptions.Unknown, + core_exceptions.InternalServerError, + ], + ) + def test_execute_query_non_retryable( + self, client, execute_query_mock, prepare_mock, ExceptionType + ): + values = [ + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + ExceptionType(""), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = CrossSync._Sync_Impl.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + with pytest.raises(ExceptionType): + r = CrossSync._Sync_Impl.next(result) + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_prepare_query_retryable( + self, client, execute_query_mock, prepare_mock, retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1") + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 2 + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.InvalidArgument, + core_exceptions.FailedPrecondition, + core_exceptions.PermissionDenied, + core_exceptions.MethodNotImplemented, + core_exceptions.Cancelled, + core_exceptions.AlreadyExists, + core_exceptions.OutOfRange, + core_exceptions.DataLoss, + core_exceptions.Unauthenticated, + core_exceptions.NotFound, + core_exceptions.ResourceExhausted, + core_exceptions.Unknown, + core_exceptions.InternalServerError, + ], + ) + def test_prepare_query_non_retryable( + self, client, execute_query_mock, prepare_mock, non_retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + non_retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1") + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises(non_retryable_exception): + client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) diff --git a/tests/unit/data/_sync_autogen/test_metrics_interceptor.py b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py new file mode 100644 index 000000000..c4efcc5b9 --- /dev/null +++ b/tests/unit/data/_sync_autogen/test_metrics_interceptor.py @@ -0,0 +1,307 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from grpc import RpcError +from grpc import ClientCallDetails +from google.cloud.bigtable.data._metrics.data_model import ActiveOperationMetric +from google.cloud.bigtable.data._metrics.data_model import OperationState +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock +from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor, +) + + +def _make_mock_stream_call(values, exc=None): + """Create a mock call object that can be used for streaming calls""" + call = CrossSync._Sync_Impl.Mock() + + def gen(): + for val in values: + yield val + if exc: + raise exc + + call.__iter__ = mock.Mock(return_value=gen()) + return call + + +class TestMetricsInterceptor: + @staticmethod + def _get_target_class(): + return BigtableMetricsInterceptor + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_unary_unary_interceptor_op_not_found(self): + """Test that interceptor call continuation if op is not found""" + instance = self._make_one() + continuation = CrossSync._Sync_Impl.Mock() + details = ClientCallDetails() + details.metadata = [] + request = mock.Mock() + instance.intercept_unary_unary(continuation, details, request) + continuation.assert_called_once_with(details, request) + + def test_unary_unary_interceptor_success(self): + """Test that interceptor handles successful unary-unary calls""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync._Sync_Impl.Mock() + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + result = instance.intercept_unary_unary(continuation, details, request) + assert result == call + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + op.end_attempt_with_status.assert_not_called() + + def test_unary_unary_interceptor_failure(self): + """test a failed RpcError with metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + exc.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + exc.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + continuation = CrossSync._Sync_Impl.Mock(side_effect=exc) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + def test_unary_unary_interceptor_failure_no_metadata(self): + """test with RpcError without without metadata attached""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + continuation = CrossSync._Sync_Impl.Mock(side_effect=exc) + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_not_called() + + def test_unary_unary_interceptor_failure_generic(self): + """test generic exception""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + ActiveOperationMetric._active_operation_context.set(op) + exc = ValueError("test") + continuation = CrossSync._Sync_Impl.Mock(side_effect=exc) + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(ValueError) as e: + instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + op.add_response_metadata.assert_not_called() + + def test_unary_stream_interceptor_op_not_found(self): + """Test that interceptor calls continuation if op is not found""" + instance = self._make_one() + continuation = CrossSync._Sync_Impl.Mock() + details = ClientCallDetails() + details.metadata = [] + request = mock.Mock() + instance.intercept_unary_stream(continuation, details, request) + continuation.assert_called_once_with(details, request) + + def test_unary_stream_interceptor_success(self): + """Test that interceptor handles successful unary-stream calls""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync._Sync_Impl.Mock( + return_value=_make_mock_stream_call([1, 2]) + ) + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + details = ClientCallDetails() + request = mock.Mock() + wrapper = instance.intercept_unary_stream(continuation, details, request) + results = [val for val in wrapper] + assert results == [1, 2] + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + op.end_attempt_with_status.assert_not_called() + + def test_unary_stream_interceptor_failure_mid_stream(self): + """Test that interceptor handles failures mid-stream""" + from grpc.aio import AioRpcError, Metadata + + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = AioRpcError(0, Metadata(), Metadata(("a", "b"), ("c", "d"))) + continuation = CrossSync._Sync_Impl.Mock( + return_value=_make_mock_stream_call([1], exc=exc) + ) + details = ClientCallDetails() + request = mock.Mock() + wrapper = instance.intercept_unary_stream(continuation, details, request) + with pytest.raises(AioRpcError) as e: + [val for val in wrapper] + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + def test_unary_stream_interceptor_failure_start_stream(self): + """Test that interceptor handles failures at start of stream with RpcError with metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + exc.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[("a", "b")]) + exc.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[("c", "d")]) + continuation = CrossSync._Sync_Impl.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_called_once_with({"a": "b", "c": "d"}) + + def test_unary_stream_interceptor_failure_start_stream_no_metadata(self): + """Test that interceptor handles failures at start of stream with RpcError with no metadata""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = RpcError("test") + continuation = CrossSync._Sync_Impl.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_not_called() + + def test_unary_stream_interceptor_failure_start_stream_generic(self): + """Test that interceptor handles failures at start of stream with generic exception""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = OperationState.ACTIVE_ATTEMPT + op.start_time_ns = 0 + op.first_response_latency = None + ActiveOperationMetric._active_operation_context.set(op) + exc = ValueError("test") + continuation = CrossSync._Sync_Impl.Mock() + continuation.side_effect = exc + details = ClientCallDetails() + request = mock.Mock() + with pytest.raises(ValueError) as e: + instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + assert op.first_response_latency_ns is not None + op.add_response_metadata.assert_not_called() + + @pytest.mark.parametrize( + "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS] + ) + def test_unary_unary_interceptor_start_operation(self, initial_state): + """if called with a newly created operation, it should be started""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = initial_state + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync._Sync_Impl.Mock() + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[]) + details = ClientCallDetails() + request = mock.Mock() + instance.intercept_unary_unary(continuation, details, request) + op.start_attempt.assert_called_once() + + @pytest.mark.parametrize( + "initial_state", [OperationState.CREATED, OperationState.BETWEEN_ATTEMPTS] + ) + def test_unary_stream_interceptor_start_operation(self, initial_state): + """if called with a newly created operation, it should be started""" + instance = self._make_one() + op = mock.Mock() + op.uuid = "test-uuid" + op.state = initial_state + ActiveOperationMetric._active_operation_context.set(op) + continuation = CrossSync._Sync_Impl.Mock( + return_value=_make_mock_stream_call([1, 2]) + ) + call = continuation.return_value + call.trailing_metadata = CrossSync._Sync_Impl.Mock(return_value=[]) + call.initial_metadata = CrossSync._Sync_Impl.Mock(return_value=[]) + details = ClientCallDetails() + request = mock.Mock() + instance.intercept_unary_stream(continuation, details, request) + op.start_attempt.assert_called_once() diff --git a/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/tests/unit/data/_sync_autogen/test_mutations_batcher.py new file mode 100644 index 000000000..92d16b349 --- /dev/null +++ b/tests/unit/data/_sync_autogen/test_mutations_batcher.py @@ -0,0 +1,1083 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +import mock +import asyncio +import time +import google.api_core.exceptions as core_exceptions +import google.api_core.retry +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow +from google.cloud.bigtable.data import TABLE_DEFAULT +from google.cloud.bigtable.data._cross_sync import CrossSync + + +class Test_FlowControl: + @staticmethod + def _target_class(): + return CrossSync._Sync_Impl._FlowControl + + def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): + return self._target_class()(max_mutation_count, max_mutation_bytes) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + mutation.size = lambda: size + return mutation + + def test_ctor(self): + max_mutation_count = 9 + max_mutation_bytes = 19 + instance = self._make_one(max_mutation_count, max_mutation_bytes) + assert instance._max_mutation_count == max_mutation_count + assert instance._max_mutation_bytes == max_mutation_bytes + assert instance._in_flight_mutation_count == 0 + assert instance._in_flight_mutation_bytes == 0 + assert isinstance(instance._capacity_condition, CrossSync._Sync_Impl.Condition) + + def test_ctor_invalid_values(self): + """Test that values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(0, 1) + assert "max_mutation_count must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(1, 0) + assert "max_mutation_bytes must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "max_count,max_size,existing_count,existing_size,new_count,new_size,expected", + [ + (1, 1, 0, 0, 0, 0, True), + (1, 1, 1, 1, 1, 1, False), + (10, 10, 0, 0, 0, 0, True), + (10, 10, 0, 0, 9, 9, True), + (10, 10, 0, 0, 11, 9, True), + (10, 10, 0, 1, 11, 9, True), + (10, 10, 1, 0, 11, 9, False), + (10, 10, 0, 0, 9, 11, True), + (10, 10, 1, 0, 9, 11, True), + (10, 10, 0, 1, 9, 11, False), + (10, 1, 0, 0, 1, 0, True), + (1, 10, 0, 0, 0, 8, True), + (float("inf"), float("inf"), 0, 0, 10000000000.0, 10000000000.0, True), + (8, 8, 0, 0, 10000000000.0, 10000000000.0, True), + (12, 12, 6, 6, 5, 5, True), + (12, 12, 5, 5, 6, 6, True), + (12, 12, 6, 6, 6, 6, True), + (12, 12, 6, 6, 7, 7, False), + (12, 12, 0, 0, 13, 13, True), + (12, 12, 12, 0, 0, 13, True), + (12, 12, 0, 12, 13, 0, True), + (12, 12, 1, 1, 13, 13, False), + (12, 12, 1, 1, 0, 13, False), + (12, 12, 1, 1, 13, 0, False), + ], + ) + def test__has_capacity( + self, + max_count, + max_size, + existing_count, + existing_size, + new_count, + new_size, + expected, + ): + """_has_capacity should return True if the new mutation will will not exceed the max count or size""" + instance = self._make_one(max_count, max_size) + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + assert instance._has_capacity(new_count, new_size) == expected + + @pytest.mark.parametrize( + "existing_count,existing_size,added_count,added_size,new_count,new_size", + [ + (0, 0, 0, 0, 0, 0), + (2, 2, 1, 1, 1, 1), + (2, 0, 1, 0, 1, 0), + (0, 2, 0, 1, 0, 1), + (10, 10, 0, 0, 10, 10), + (10, 10, 5, 5, 5, 5), + (0, 0, 1, 1, -1, -1), + ], + ) + def test_remove_from_flow_value_update( + self, + existing_count, + existing_size, + added_count, + added_size, + new_count, + new_size, + ): + """completed mutations should lower the inflight values""" + instance = self._make_one() + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + mutation = self._make_mutation(added_count, added_size) + instance.remove_from_flow(mutation) + assert instance._in_flight_mutation_count == new_count + assert instance._in_flight_mutation_bytes == new_size + + def test__remove_from_flow_unlock(self): + """capacity condition should notify after mutation is complete""" + instance = self._make_one(10, 10) + instance._in_flight_mutation_count = 10 + instance._in_flight_mutation_bytes = 10 + + def task_routine(): + with instance._capacity_condition: + instance._capacity_condition.wait_for( + lambda: instance._has_capacity(1, 1) + ) + + import threading + + thread = threading.Thread(target=task_routine) + thread.start() + task_alive = thread.is_alive + CrossSync._Sync_Impl.sleep(0.05) + assert task_alive() is True + mutation = self._make_mutation(count=0, size=5) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 10 + assert instance._in_flight_mutation_bytes == 5 + assert task_alive() is True + instance._in_flight_mutation_bytes = 10 + mutation = self._make_mutation(count=5, size=0) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 10 + assert task_alive() is True + instance._in_flight_mutation_count = 10 + mutation = self._make_mutation(count=5, size=5) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 5 + assert task_alive() is False + + @pytest.mark.parametrize( + "mutations,count_cap,size_cap,expected_results", + [ + ([(5, 5), (1, 1), (1, 1)], 10, 10, [[(5, 5), (1, 1), (1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 1, 1, [[(1, 1)], [(1, 1)], [(1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 2, 10, [[(1, 1), (1, 1)], [(1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 10, 2, [[(1, 1), (1, 1)], [(1, 1)]]), + ( + [(1, 1), (5, 5), (4, 1), (1, 4), (1, 1)], + 5, + 5, + [[(1, 1)], [(5, 5)], [(4, 1), (1, 4)], [(1, 1)]], + ), + ], + ) + def test_add_to_flow(self, mutations, count_cap, size_cap, expected_results): + """Test batching with various flow control settings""" + mutation_objs = [self._make_mutation(count=m[0], size=m[1]) for m in mutations] + instance = self._make_one(count_cap, size_cap) + i = 0 + for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + assert len(batch[j].mutations) == expected_batch[j][0] + assert batch[j].size() == expected_batch[j][1] + instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @pytest.mark.parametrize( + "mutations,max_limit,expected_results", + [ + ([(1, 1)] * 11, 10, [[(1, 1)] * 10, [(1, 1)]]), + ([(1, 1)] * 10, 1, [[(1, 1)] for _ in range(10)]), + ([(1, 1)] * 10, 2, [[(1, 1), (1, 1)] for _ in range(5)]), + ], + ) + def test_add_to_flow_max_mutation_limits( + self, mutations, max_limit, expected_results + ): + """Test flow control running up against the max API limit + Should submit request early, even if the flow control has room for more""" + subpath = "_async" if CrossSync._Sync_Impl.is_async else "_sync_autogen" + path = f"google.cloud.bigtable.data.{subpath}.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT" + with mock.patch(path, max_limit): + mutation_objs = [ + self._make_mutation(count=m[0], size=m[1]) for m in mutations + ] + instance = self._make_one(float("inf"), float("inf")) + i = 0 + for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + assert len(batch[j].mutations) == expected_batch[j][0] + assert batch[j].size() == expected_batch[j][1] + instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + def test_add_to_flow_oversize(self): + """mutations over the flow control limits should still be accepted""" + instance = self._make_one(2, 3) + large_size_mutation = self._make_mutation(count=1, size=10) + large_count_mutation = self._make_mutation(count=10, size=1) + results = [out for out in instance.add_to_flow([large_size_mutation])] + assert len(results) == 1 + instance.remove_from_flow(results[0]) + count_results = [out for out in instance.add_to_flow(large_count_mutation)] + assert len(count_results) == 1 + + +class TestMutationsBatcher: + def _get_target_class(self): + return CrossSync._Sync_Impl.MutationsBatcher + + def _make_one(self, table=None, **kwargs): + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import ServiceUnavailable + + if table is None: + table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 10 + table.default_mutate_rows_retryable_errors = ( + DeadlineExceeded, + ServiceUnavailable, + ) + return self._get_target_class()(table, **kwargs) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.size = lambda: size + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + return mutation + + def test_ctor_defaults(self): + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = [Exception] + with self._make_one(table) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._max_mutation_count == 100000 + assert instance._flow_control._max_mutation_bytes == 104857600 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert ( + instance._operation_timeout + == table.default_mutate_rows_operation_timeout + ) + assert ( + instance._attempt_timeout + == table.default_mutate_rows_attempt_timeout + ) + assert ( + instance._retryable_errors + == table.default_mutate_rows_retryable_errors + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == 5 + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_explicit(self): + """Test with explicit parameters""" + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + flush_interval = 20 + flush_limit_count = 17 + flush_limit_bytes = 19 + flow_control_max_mutation_count = 1001 + flow_control_max_bytes = 12 + operation_timeout = 11 + attempt_timeout = 2 + retryable_errors = [Exception] + with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=operation_timeout, + batch_attempt_timeout=attempt_timeout, + batch_retryable_errors=retryable_errors, + ) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert ( + instance._flow_control._max_mutation_count + == flow_control_max_mutation_count + ) + assert ( + instance._flow_control._max_mutation_bytes == flow_control_max_bytes + ) + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert instance._operation_timeout == operation_timeout + assert instance._attempt_timeout == attempt_timeout + assert instance._retryable_errors == retryable_errors + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == flush_interval + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_no_flush_limits(self): + """Test with None for flush limits""" + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = () + flush_interval = None + flush_limit_count = None + flush_limit_bytes = None + with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + ) as instance: + assert instance._target == table + assert instance.closed is False + assert instance._staged_entries == [] + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] is None + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_invalid_values(self): + """Test that timeout values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(batch_operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(batch_attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + def test_default_argument_consistency(self): + """We supply default arguments in MutationsBatcherAsync.__init__, and in + table.mutations_batcher. Make sure any changes to defaults are applied to + both places""" + import inspect + + get_batcher_signature = dict( + inspect.signature(CrossSync._Sync_Impl.Table.mutations_batcher).parameters + ) + get_batcher_signature.pop("self") + batcher_init_signature = dict( + inspect.signature(self._get_target_class()).parameters + ) + batcher_init_signature.pop("table") + assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) + assert len(get_batcher_signature) == 8 + assert set(get_batcher_signature.keys()) == set(batcher_init_signature.keys()) + for arg_name in get_batcher_signature.keys(): + assert ( + get_batcher_signature[arg_name].default + == batcher_init_signature[arg_name].default + ) + + @pytest.mark.parametrize("input_val", [None, 0, -1]) + def test__start_flush_timer_w_empty_input(self, input_val): + """Empty/invalid timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + with self._make_one() as instance: + (sleep_obj, sleep_method) = (instance._closed, "wait") + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + result = instance._timer_routine(input_val) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + assert result is None + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__start_flush_timer_call_when_closed(self): + """closed batcher's timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + with self._make_one() as instance: + instance.close() + flush_mock.reset_mock() + (sleep_obj, sleep_method) = (instance._closed, "wait") + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + instance._timer_routine(10) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + + @pytest.mark.parametrize("num_staged", [0, 1, 10]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__flush_timer(self, num_staged): + """Timer should continue to call _schedule_flush in a loop""" + from google.cloud.bigtable.data._cross_sync import CrossSync + + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + expected_sleep = 12 + with self._make_one(flush_interval=expected_sleep) as instance: + loop_num = 3 + instance._staged_entries = [mock.Mock()] * num_staged + with mock.patch.object( + CrossSync._Sync_Impl, "event_wait" + ) as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [TabError("expected")] + with pytest.raises(TabError): + self._get_target_class()._timer_routine( + instance, expected_sleep + ) + assert sleep_mock.call_count == loop_num + 1 + sleep_kwargs = sleep_mock.call_args[1] + assert sleep_kwargs["timeout"] == expected_sleep + assert flush_mock.call_count == (0 if num_staged == 0 else loop_num) + + def test__flush_timer_close(self): + """Timer should continue terminate after close""" + with mock.patch.object(self._get_target_class(), "_schedule_flush"): + with self._make_one() as instance: + assert instance._flush_timer.done() is False + instance.close() + assert instance._flush_timer.done() is True + + def test_append_closed(self): + """Should raise exception""" + instance = self._make_one() + instance.close() + with pytest.raises(RuntimeError): + instance.append(mock.Mock()) + + def test_append_wrong_mutation(self): + """Mutation objects should raise an exception. + Only support RowMutationEntry""" + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + with self._make_one() as instance: + expected_error = "invalid mutation type: DeleteAllFromRow. Only RowMutationEntry objects are supported by batcher" + with pytest.raises(ValueError) as e: + instance.append(DeleteAllFromRow()) + assert str(e.value) == expected_error + + def test_append_outside_flow_limits(self): + """entries larger than mutation limits are still processed""" + with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + oversized_entry = self._make_mutation(count=0, size=2) + instance.append(oversized_entry) + assert instance._staged_entries == [oversized_entry] + assert instance._staged_count == 0 + assert instance._staged_bytes == 2 + instance._staged_entries = [] + with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + overcount_entry = self._make_mutation(count=2, size=0) + instance.append(overcount_entry) + assert instance._staged_entries == [overcount_entry] + assert instance._staged_count == 2 + assert instance._staged_bytes == 0 + instance._staged_entries = [] + + def test_append_flush_runs_after_limit_hit(self): + """If the user appends a bunch of entries above the flush limits back-to-back, + it should still flush in a single task""" + with mock.patch.object( + self._get_target_class(), "_execute_mutate_rows" + ) as op_mock: + with self._make_one(flush_limit_bytes=100) as instance: + + def mock_call(*args, **kwargs): + return [] + + op_mock.side_effect = mock_call + instance.append(self._make_mutation(size=99)) + num_entries = 10 + for _ in range(num_entries): + instance.append(self._make_mutation(size=1)) + instance._wait_for_batch_results(*instance._flush_jobs) + assert op_mock.call_count == 1 + sent_batch = op_mock.call_args[0][0] + assert len(sent_batch) == 2 + assert len(instance._staged_entries) == num_entries - 1 + + @pytest.mark.parametrize( + "flush_count,flush_bytes,mutation_count,mutation_bytes,expect_flush", + [ + (10, 10, 1, 1, False), + (10, 10, 9, 9, False), + (10, 10, 10, 1, True), + (10, 10, 1, 10, True), + (10, 10, 10, 10, True), + (1, 1, 10, 10, True), + (1, 1, 0, 0, False), + ], + ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_append( + self, flush_count, flush_bytes, mutation_count, mutation_bytes, expect_flush + ): + """test appending different mutations, and checking if it causes a flush""" + with self._make_one( + flush_limit_mutation_count=flush_count, flush_limit_bytes=flush_bytes + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=mutation_count, size=mutation_bytes) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + instance.append(mutation) + assert flush_mock.call_count == bool(expect_flush) + assert instance._staged_count == mutation_count + assert instance._staged_bytes == mutation_bytes + assert instance._staged_entries == [mutation] + instance._staged_entries = [] + + def test_append_multiple_sequentially(self): + """Append multiple mutations""" + with self._make_one( + flush_limit_mutation_count=8, flush_limit_bytes=8 + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=2, size=3) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 2 + assert instance._staged_bytes == 3 + assert len(instance._staged_entries) == 1 + instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 4 + assert instance._staged_bytes == 6 + assert len(instance._staged_entries) == 2 + instance.append(mutation) + assert flush_mock.call_count == 1 + assert instance._staged_count == 6 + assert instance._staged_bytes == 9 + assert len(instance._staged_entries) == 3 + instance._staged_entries = [] + + def test_flush_flow_control_concurrent_requests(self): + """requests should happen in parallel if flow control breaks up single flush into batches""" + import time + + num_calls = 10 + fake_mutations = [self._make_mutation(count=1) for _ in range(num_calls)] + with self._make_one(flow_control_max_mutation_count=1) as instance: + with mock.patch.object( + instance, "_execute_mutate_rows", CrossSync._Sync_Impl.Mock() + ) as op_mock: + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.1) + return [] + + op_mock.side_effect = mock_call + start_time = time.monotonic() + instance._staged_entries = fake_mutations + instance._schedule_flush() + CrossSync._Sync_Impl.sleep(0.01) + for i in range(num_calls): + instance._flow_control.remove_from_flow( + [self._make_mutation(count=1)] + ) + CrossSync._Sync_Impl.sleep(0.01) + instance._wait_for_batch_results(*instance._flush_jobs) + duration = time.monotonic() - start_time + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert duration < 0.5 + assert op_mock.call_count == num_calls + + def test_schedule_flush_no_mutations(self): + """schedule flush should return None if no staged mutations""" + with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + for i in range(3): + assert instance._schedule_flush() is None + assert flush_mock.call_count == 0 + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_schedule_flush_with_mutations(self): + """if new mutations exist, should add a new flush task to _flush_jobs""" + with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + flush_mock.side_effect = lambda x: time.sleep(0.1) + for i in range(1, 4): + mutation = mock.Mock() + instance._staged_entries = [mutation] + instance._schedule_flush() + assert instance._staged_entries == [] + asyncio.sleep(0) + assert instance._staged_entries == [] + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert flush_mock.call_count == 1 + flush_mock.reset_mock() + + def test__flush_internal(self): + """_flush_internal should: + - await previous flush call + - delegate batching to _flow_control + - call _execute_mutate_rows on each batch + - update self.exceptions and self._entries_processed_since_last_raise""" + num_entries = 10 + with self._make_one() as instance: + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + + def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + def test_flush_clears_job_list(self): + """a job should be added to _flush_jobs when _schedule_flush is called, + and removed when it completes""" + with self._make_one() as instance: + with mock.patch.object( + instance, "_flush_internal", CrossSync._Sync_Impl.Mock() + ) as flush_mock: + flush_mock.side_effect = lambda x: time.sleep(0.1) + mutations = [self._make_mutation(count=1, size=1)] + instance._staged_entries = mutations + assert instance._flush_jobs == set() + new_job = instance._schedule_flush() + assert instance._flush_jobs == {new_job} + new_job.result() + assert instance._flush_jobs == set() + + @pytest.mark.parametrize( + "num_starting,num_new_errors,expected_total_errors", + [ + (0, 0, 0), + (0, 1, 1), + (0, 2, 2), + (1, 0, 1), + (1, 1, 2), + (10, 2, 12), + (10, 20, 20), + ], + ) + def test__flush_internal_with_errors( + self, num_starting, num_new_errors, expected_total_errors + ): + """errors returned from _execute_mutate_rows should be added to internal exceptions""" + from google.cloud.bigtable.data import exceptions + + num_entries = 10 + expected_errors = [ + exceptions.FailedMutationEntryError(mock.Mock(), mock.Mock(), ValueError()) + ] * num_new_errors + with self._make_one() as instance: + instance._oldest_exceptions = [mock.Mock()] * num_starting + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + execute_mock.return_value = expected_errors + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + + def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + found_exceptions = instance._oldest_exceptions + list( + instance._newest_exceptions + ) + assert len(found_exceptions) == expected_total_errors + for i in range(num_starting, expected_total_errors): + assert found_exceptions[i] == expected_errors[i - num_starting] + assert found_exceptions[i].index is None + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + def _mock_gapic_return(self, num=5): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + def gen(num): + for i in range(num): + entry = MutateRowsResponse.Entry( + index=i, status=status_pb2.Status(code=0) + ) + yield MutateRowsResponse(entries=[entry]) + + return gen(num) + + def test_timer_flush_end_to_end(self): + """Flush should automatically trigger after flush_interval""" + num_mutations = 10 + mutations = [self._make_mutation(count=2, size=2)] * num_mutations + with self._make_one(flush_interval=0.05) as instance: + instance._target.default_operation_timeout = 10 + instance._target.default_attempt_timeout = 9 + with mock.patch.object( + instance._target.client._gapic_client, "mutate_rows" + ) as gapic_mock: + gapic_mock.side_effect = ( + lambda *args, **kwargs: self._mock_gapic_return(num_mutations) + ) + for m in mutations: + instance.append(m) + assert instance._entries_processed_since_last_raise == 0 + CrossSync._Sync_Impl.sleep(0.1) + assert instance._entries_processed_since_last_raise == num_mutations + + def test__execute_mutate_rows(self): + with mock.patch.object( + CrossSync._Sync_Impl, "_MutateRowsOperation" + ) as mutate_rows: + mutate_rows.return_value = CrossSync._Sync_Impl.Mock() + start_operation = mutate_rows().start + table = mock.Mock() + table.table_name = "test-table" + table.app_profile_id = "test-app-profile" + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = instance._execute_mutate_rows(batch) + assert start_operation.call_count == 1 + (args, kwargs) = mutate_rows.call_args + assert args[0] == table.client._gapic_client + assert args[1] == table + assert args[2] == batch + kwargs["operation_timeout"] == 17 + kwargs["attempt_timeout"] == 13 + assert result == [] + + def test__execute_mutate_rows_returns_errors(self): + """Errors from operation should be retruned as list""" + from google.cloud.bigtable.data.exceptions import ( + MutationsExceptionGroup, + FailedMutationEntryError, + ) + + with mock.patch.object( + CrossSync._Sync_Impl._MutateRowsOperation, "start" + ) as mutate_rows: + err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) + err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) + mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = instance._execute_mutate_rows(batch) + assert len(result) == 2 + assert result[0] == err1 + assert result[1] == err2 + assert result[0].index is None + assert result[1].index is None + + def test__raise_exceptions(self): + """Raise exceptions and reset error state""" + from google.cloud.bigtable.data import exceptions + + expected_total = 1201 + expected_exceptions = [RuntimeError("mock")] * 3 + with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance._raise_exceptions() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + (instance._oldest_exceptions, instance._newest_exceptions) = ([], []) + instance._raise_exceptions() + + def test___enter__(self): + """Should return self""" + with self._make_one() as instance: + assert instance.__enter__() == instance + + def test___exit__(self): + """aexit should call close""" + with self._make_one() as instance: + with mock.patch.object(instance, "close") as close_mock: + instance.__exit__(None, None, None) + assert close_mock.call_count == 1 + + def test_close(self): + """Should clean up all resources""" + with self._make_one() as instance: + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + with mock.patch.object(instance, "_raise_exceptions") as raise_mock: + instance.close() + assert instance.closed is True + assert instance._flush_timer.done() is True + assert instance._flush_jobs == set() + assert flush_mock.call_count == 1 + assert raise_mock.call_count == 1 + + def test_close_w_exceptions(self): + """Raise exceptions on close""" + from google.cloud.bigtable.data import exceptions + + expected_total = 10 + expected_exceptions = [RuntimeError("mock")] + with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance.close() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + (instance._oldest_exceptions, instance._newest_exceptions) = ([], []) + + def test__on_exit(self, recwarn): + """Should raise warnings if unflushed mutations exist""" + with self._make_one() as instance: + instance._on_exit() + assert len(recwarn) == 0 + num_left = 4 + instance._staged_entries = [mock.Mock()] * num_left + with pytest.warns(UserWarning) as w: + instance._on_exit() + assert len(w) == 1 + assert "unflushed mutations" in str(w[0].message).lower() + assert str(num_left) in str(w[0].message) + instance._closed.set() + instance._on_exit() + assert len(recwarn) == 0 + instance._staged_entries = [] + + def test_atexit_registration(self): + """Should run _on_exit on program termination""" + import atexit + + with mock.patch.object(atexit, "register") as register_mock: + assert register_mock.call_count == 0 + with self._make_one(): + assert register_mock.call_count == 1 + + def test_timeout_args_passed(self): + """batch_operation_timeout and batch_attempt_timeout should be used + in api calls""" + with mock.patch.object( + CrossSync._Sync_Impl, + "_MutateRowsOperation", + return_value=CrossSync._Sync_Impl.Mock(), + ) as mutate_rows: + expected_operation_timeout = 17 + expected_attempt_timeout = 13 + with self._make_one( + batch_operation_timeout=expected_operation_timeout, + batch_attempt_timeout=expected_attempt_timeout, + ) as instance: + assert instance._operation_timeout == expected_operation_timeout + assert instance._attempt_timeout == expected_attempt_timeout + instance._execute_mutate_rows([self._make_mutation()]) + assert mutate_rows.call_count == 1 + kwargs = mutate_rows.call_args[1] + assert kwargs["operation_timeout"] == expected_operation_timeout + assert kwargs["attempt_timeout"] == expected_attempt_timeout + + @pytest.mark.parametrize( + "limit,in_e,start_e,end_e", + [ + (10, 0, (10, 0), (10, 0)), + (1, 10, (0, 0), (1, 1)), + (10, 1, (0, 0), (1, 0)), + (10, 10, (0, 0), (10, 0)), + (10, 11, (0, 0), (10, 1)), + (3, 20, (0, 0), (3, 3)), + (10, 20, (0, 0), (10, 10)), + (10, 21, (0, 0), (10, 10)), + (2, 1, (2, 0), (2, 1)), + (2, 1, (1, 0), (2, 0)), + (2, 2, (1, 0), (2, 1)), + (3, 1, (3, 1), (3, 2)), + (3, 3, (3, 1), (3, 3)), + (1000, 5, (999, 0), (1000, 4)), + (1000, 5, (0, 0), (5, 0)), + (1000, 5, (1000, 0), (1000, 5)), + ], + ) + def test__add_exceptions(self, limit, in_e, start_e, end_e): + """Test that the _add_exceptions function properly updates the + _oldest_exceptions and _newest_exceptions lists + Args: + - limit: the _exception_list_limit representing the max size of either list + - in_e: size of list of exceptions to send to _add_exceptions + - start_e: a tuple of ints representing the initial sizes of _oldest_exceptions and _newest_exceptions + - end_e: a tuple of ints representing the expected sizes of _oldest_exceptions and _newest_exceptions + """ + from collections import deque + + input_list = [RuntimeError(f"mock {i}") for i in range(in_e)] + mock_batcher = mock.Mock() + mock_batcher._oldest_exceptions = [ + RuntimeError(f"starting mock {i}") for i in range(start_e[0]) + ] + mock_batcher._newest_exceptions = deque( + [RuntimeError(f"starting mock {i}") for i in range(start_e[1])], + maxlen=limit, + ) + mock_batcher._exception_list_limit = limit + mock_batcher._exceptions_since_last_raise = 0 + self._get_target_class()._add_exceptions(mock_batcher, input_list) + assert len(mock_batcher._oldest_exceptions) == end_e[0] + assert len(mock_batcher._newest_exceptions) == end_e[1] + assert mock_batcher._exceptions_since_last_raise == in_e + oldest_list_diff = end_e[0] - start_e[0] + newest_list_diff = min(max(in_e - oldest_list_diff, 0), limit) + for i in range(oldest_list_diff): + assert mock_batcher._oldest_exceptions[i + start_e[0]] == input_list[i] + for i in range(1, newest_list_diff + 1): + assert mock_batcher._newest_exceptions[-i] == input_list[-i] + + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + core_exceptions.Cancelled, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + def test_customizable_retryable_errors(self, input_retryables, expected_retryables): + """Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer.""" + with mock.patch.object( + google.api_core.retry, "if_exception_type" + ) as predicate_builder_mock: + with mock.patch.object( + CrossSync._Sync_Impl, "retry_target" + ) as retry_fn_mock: + table = None + with mock.patch("asyncio.create_task"): + table = CrossSync._Sync_Impl.Table(mock.Mock(), "instance", "table") + with self._make_one( + table, batch_retryable_errors=input_retryables + ) as instance: + assert instance._retryable_errors == expected_retryables + expected_predicate = expected_retryables.__contains__ + predicate_builder_mock.return_value = expected_predicate + retry_fn_mock.side_effect = RuntimeError("stop early") + mutation = self._make_mutation(count=1, size=1) + instance._execute_mutate_rows([mutation]) + predicate_builder_mock.assert_called_once_with( + *expected_retryables, _MutateRowsIncomplete + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + assert retry_call_args[1] is expected_predicate + + def test_large_batch_write(self): + """Test that a large batch of mutations can be written""" + import math + + num_mutations = 10000 + flush_limit = 1000 + mutations = [self._make_mutation(count=1, size=1)] * num_mutations + with self._make_one(flush_limit_mutation_count=flush_limit) as instance: + operation_mock = mock.Mock() + rpc_call_mock = CrossSync._Sync_Impl.Mock() + operation_mock().start = rpc_call_mock + CrossSync._Sync_Impl._MutateRowsOperation = operation_mock + for m in mutations: + instance.append(m) + expected_calls = math.ceil(num_mutations / flush_limit) + assert rpc_call_mock.call_count == expected_calls + assert instance._entries_processed_since_last_raise == num_mutations + assert len(instance._staged_entries) == 0 diff --git a/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py b/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py new file mode 100644 index 000000000..8ceb0daf7 --- /dev/null +++ b/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py @@ -0,0 +1,328 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +import os +import warnings +import pytest +import mock +from itertools import zip_longest +from google.cloud.bigtable_v2 import ReadRowsResponse +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.row import Row +from ...v2_client.test_row_merger import ReadRowsTest, TestFile +from google.cloud.bigtable.data._cross_sync import CrossSync + + +class TestReadRowsAcceptance: + @staticmethod + def _get_operation_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + @staticmethod + def _get_client_class(): + return CrossSync._Sync_Impl.DataClient + + def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "../read-rows-acceptance-test.json") + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + @staticmethod + def extract_results_from_row(row: Row): + results = [] + for family, col, cells in row.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_ns // 1000, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + ) + return results + + @staticmethod + def _coro_wrapper(stream): + return stream + + def _process_chunks(self, *chunks): + def _row_stream(): + yield ReadRowsResponse(chunks=chunks) + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + results = [] + for row in merger: + results.append(row) + return results + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + def test_row_merger_scenario(self, test_case: ReadRowsTest): + def _scenerio_stream(): + for chunk in test_case.chunks: + yield ReadRowsResponse(chunks=[chunk]) + + try: + results = [] + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_scenerio_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + for row in merger: + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + def test_read_rows_scenario(self, test_case: ReadRowsTest): + def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list): + self.chunk_list = chunk_list + self.idx = -1 + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + chunk = self.chunk_list[self.idx] + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync._Sync_Impl.StopIteration + + def __next__(self): + return self.__anext__() + + def cancel(self): + pass + + return mock_stream(chunk_list) + + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client = self._get_client_class()() + try: + table = client.get_table("instance", "table") + results = [] + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.return_value = _make_gapic_stream(test_case.chunks) + for row in table.read_rows_stream(query={}): + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + finally: + client.close() + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + def test_out_of_order_rows(self): + def _row_stream(): + yield ReadRowsResponse(last_scanned_row_key=b"a") + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = b"b" + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + with pytest.raises(InvalidChunk): + for _ in merger: + pass + + def test_bare_reset(self): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + def test_missing_family(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + def test_mid_cell_row_key_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + def test_mid_cell_family_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + family_name="f2", value=b"v", commit_row=True + ), + ) + + def test_mid_cell_qualifier_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + qualifier=b"q2", value=b"v", commit_row=True + ), + ) + + def test_mid_cell_timestamp_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + def test_mid_cell_labels_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) diff --git a/tests/unit/data/execute_query/__init__.py b/tests/unit/data/execute_query/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/execute_query/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/execute_query/_async/__init__.py b/tests/unit/data/execute_query/_async/__init__.py new file mode 100644 index 000000000..6d5e14bcf --- /dev/null +++ b/tests/unit/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/unit/data/execute_query/_async/test_query_iterator.py b/tests/unit/data/execute_query/_async/test_query_iterator.py new file mode 100644 index 000000000..df6321f7f --- /dev/null +++ b/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -0,0 +1,407 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +from google.cloud.bigtable.data import exceptions +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) +import pytest +import concurrent.futures +from ..sql_helpers import ( + chunked_responses, + int_val, + column, + metadata, + int64_type, +) + +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +__CROSS_SYNC_OUTPUT__ = ( + "tests.unit.data.execute_query._sync_autogen.test_query_iterator" +) + + +@CrossSync.convert_class(sync_name="MockIterator") +class MockIterator: + def __init__(self, values, delay=None): + self._values = values + self.idx = 0 + self._delay = delay + + @CrossSync.convert(sync_name="__iter__") + def __aiter__(self): + return self + + @CrossSync.convert(sync_name="__next__") + async def __anext__(self): + if self.idx >= len(self._values): + raise CrossSync.StopIteration + if self._delay is not None: + await CrossSync.sleep(self._delay) + value = self._values[self.idx] + self.idx += 1 + return value + + +@CrossSync.convert_class(sync_name="TestQueryIterator") +class TestQueryIteratorAsync: + @staticmethod + def _target_class(): + return CrossSync.ExecuteQueryIterator + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.fixture + def proto_byte_stream(self): + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + *chunked_responses(1, int_val(5), int_val(6), token=b"token3"), + ] + return stream + + @CrossSync.pytest + async def test_iterator(self, proto_byte_stream): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + async for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + + assert mock_async_iterator.idx == len(proto_byte_stream) + + @CrossSync.pytest + async def test_iterator_returns_metadata_after_data(self, proto_byte_stream): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + await CrossSync.next(iterator) + assert len(iterator.metadata) == 2 + + assert mock_async_iterator.idx == 2 + + @CrossSync.pytest + async def test_iterator_throws_error_on_close_w_bufferred_data(self): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + # Remove the last response, which has the token. We expect this + # to cause the call to close within _next_impl_ to fail + chunked_responses(2, int_val(5), int_val(6), token=b"token3")[0], + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + i = 0 + async for row in iterator: + i += 1 + if i == 2: + break + with pytest.raises( + ValueError, + match="Unexpected buffered data at end of executeQuery reqest", + ): + await CrossSync.next(iterator) + + @CrossSync.pytest + async def test_iterator_handles_reset(self): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + stream = [ + # Expect this to be dropped by reset + *chunked_responses(2, int_val(1), int_val(2)), + *chunked_responses(3, int_val(3), int_val(4), reset=True), + *chunked_responses(2, int_val(5), int_val(6), reset=False, token=b"token1"), + # Only send first of two responses so that there is no checksum + # expect to be reset + chunked_responses(2, int_val(10), int_val(12))[0], + *chunked_responses(2, int_val(7), int_val(8), token=b"token2"), + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + results = [] + async for value in iterator: + results.append(value) + assert len(results) == 3 + [row1, row2, row3] = results + assert row1["test1"] == 3 + assert row1["test2"] == 4 + assert row2["test1"] == 5 + assert row2["test2"] == 6 + assert row3["test1"] == 7 + assert row3["test2"] == 8 + + @CrossSync.pytest + async def test_iterator_returns_error_if_metadata_requested_too_early( + self, proto_byte_stream + ): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + with pytest.raises(exceptions.EarlyMetadataCallError): + iterator.metadata + + @CrossSync.pytest + async def test_iterator_closes_on_full_consumption(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called after all results + have been successfully consumed. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + # Consume the entire iterator + results = [row async for row in iterator] + assert len(results) == 3 + + # The close method should be called automatically by the finally block + client_mock._remove_instance_registration.assert_called_once() + assert iterator.is_closed + + @CrossSync.pytest + async def test_iterator_closes_on_early_break(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called if the user breaks + out of the iteration loop early. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = CrossSync.ExecuteQueryIterator( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + async for _ in iterator: + break + + del iterator + await CrossSync.sleep(1) + # GC outside the loop bc the mock ends up holding a reference to + # the iterator + gc.collect() + await CrossSync.sleep(1) + + # The close method should be called by the finally block when the + # generator is closed + client_mock._remove_instance_registration.assert_called_once() + + @CrossSync.pytest + async def test_iterator_closes_on_error(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called if an exception + is raised during iteration. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + + class MockErrorIterator(MockIterator): + @CrossSync.convert( + sync_name="__next__", replace_symbols={"__anext__": "__next__"} + ) + async def __anext__(self): + if self.idx >= 1: + raise ValueError("Injected-test-error") + return await super().__anext__() + + mock_async_iterator = MockErrorIterator(proto_byte_stream) + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + with pytest.raises(ValueError, match="Injected-test-error"): + async for _ in iterator: + pass + + # The close method should be called by the finally block on error + client_mock._remove_instance_registration.assert_called_once() diff --git a/tests/unit/data/execute_query/_sync_autogen/__init__.py b/tests/unit/data/execute_query/_sync_autogen/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py b/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py new file mode 100644 index 000000000..3915693cd --- /dev/null +++ b/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py @@ -0,0 +1,353 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import gc +from google.cloud.bigtable.data import exceptions +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) +import pytest +import concurrent.futures +from ..sql_helpers import chunked_responses, int_val, column, metadata, int64_type +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class MockIterator: + def __init__(self, values, delay=None): + self._values = values + self.idx = 0 + self._delay = delay + + def __iter__(self): + return self + + def __next__(self): + if self.idx >= len(self._values): + raise CrossSync._Sync_Impl.StopIteration + if self._delay is not None: + CrossSync._Sync_Impl.sleep(self._delay) + value = self._values[self.idx] + self.idx += 1 + return value + + +class TestQueryIterator: + @staticmethod + def _target_class(): + return CrossSync._Sync_Impl.ExecuteQueryIterator + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.fixture + def proto_byte_stream(self): + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + *chunked_responses(1, int_val(5), int_val(6), token=b"token3"), + ] + return stream + + def test_iterator(self, proto_byte_stream): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + assert mock_async_iterator.idx == len(proto_byte_stream) + + def test_iterator_returns_metadata_after_data(self, proto_byte_stream): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + CrossSync._Sync_Impl.next(iterator) + assert len(iterator.metadata) == 2 + assert mock_async_iterator.idx == 2 + + def test_iterator_throws_error_on_close_w_bufferred_data(self): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + chunked_responses(2, int_val(5), int_val(6), token=b"token3")[0], + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + i = 0 + for row in iterator: + i += 1 + if i == 2: + break + with pytest.raises( + ValueError, match="Unexpected buffered data at end of executeQuery reqest" + ): + CrossSync._Sync_Impl.next(iterator) + + def test_iterator_handles_reset(self): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2)), + *chunked_responses(3, int_val(3), int_val(4), reset=True), + *chunked_responses(2, int_val(5), int_val(6), reset=False, token=b"token1"), + chunked_responses(2, int_val(10), int_val(12))[0], + *chunked_responses(2, int_val(7), int_val(8), token=b"token2"), + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + results = [] + for value in iterator: + results.append(value) + assert len(results) == 3 + [row1, row2, row3] = results + assert row1["test1"] == 3 + assert row1["test2"] == 4 + assert row2["test1"] == 5 + assert row2["test2"] == 6 + assert row3["test1"] == 7 + assert row3["test2"] == 8 + + def test_iterator_returns_error_if_metadata_requested_too_early( + self, proto_byte_stream + ): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + with pytest.raises(exceptions.EarlyMetadataCallError): + iterator.metadata + + def test_iterator_closes_on_full_consumption(self, proto_byte_stream): + """Tests that the iterator's close() method is called after all results + have been successfully consumed.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + results = [row for row in iterator] + assert len(results) == 3 + client_mock._remove_instance_registration.assert_called_once() + assert iterator.is_closed + + def test_iterator_closes_on_early_break(self, proto_byte_stream): + """Tests that the iterator's close() method is called if the user breaks + out of the iteration loop early.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = CrossSync._Sync_Impl.ExecuteQueryIterator( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + for _ in iterator: + break + del iterator + CrossSync._Sync_Impl.sleep(1) + gc.collect() + CrossSync._Sync_Impl.sleep(1) + client_mock._remove_instance_registration.assert_called_once() + + def test_iterator_closes_on_error(self, proto_byte_stream): + """Tests that the iterator's close() method is called if an exception + is raised during iteration.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + + class MockErrorIterator(MockIterator): + def __next__(self): + if self.idx >= 1: + raise ValueError("Injected-test-error") + return super().__next__() + + mock_async_iterator = MockErrorIterator(proto_byte_stream) + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + with pytest.raises(ValueError, match="Injected-test-error"): + for _ in iterator: + pass + client_mock._remove_instance_registration.assert_called_once() diff --git a/tests/unit/data/execute_query/sql_helpers.py b/tests/unit/data/execute_query/sql_helpers.py new file mode 100644 index 000000000..119bb2d50 --- /dev/null +++ b/tests/unit/data/execute_query/sql_helpers.py @@ -0,0 +1,224 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime, timedelta +from typing import List + +from google.protobuf import timestamp_pb2 + +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryResponse, + PrepareQueryResponse, +) +from google.cloud.bigtable_v2.types.data import ( + Value, + ProtoRows, + ProtoRowsBatch, + ResultSetMetadata, + ColumnMetadata, +) +from google.cloud.bigtable_v2.types.types import Type +import google_crc32c # type: ignore + + +def checksum(data: bytearray) -> int: + return google_crc32c.value(bytes(memoryview(data))) + + +def split_bytes_into_chunks(bytes_to_split, num_chunks) -> List[bytes]: + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result + + +def column(name: str, type: Type) -> ColumnMetadata: + c = ColumnMetadata() + c.name = name + c.type_ = type + return c + + +def metadata(*args: ColumnMetadata) -> ResultSetMetadata: + metadata = ResultSetMetadata() + metadata.proto_schema.columns = args + return metadata + + +def prepare_response( + prepared_query: bytes, + metadata: ResultSetMetadata, + valid_until=datetime.now() + timedelta(seconds=10), +) -> PrepareQueryResponse: + res = PrepareQueryResponse() + res.prepared_query = prepared_query + res.metadata = metadata + ts = timestamp_pb2.Timestamp() + ts.FromDatetime(valid_until) + res.valid_until = ts + return res + + +def batch_response( + b: bytes, reset=False, token=None, checksum=None +) -> ExecuteQueryResponse: + res = ExecuteQueryResponse() + res.results.proto_rows_batch.batch_data = b + res.results.reset = reset + res.results.resume_token = token + if checksum: + res.results.batch_checksum = checksum + return res + + +def execute_query_response( + *args: Value, reset=False, token=None, checksum=None +) -> ExecuteQueryResponse: + data = proto_rows_batch(args) + return batch_response(data, reset, token, checksum=checksum) + + +def chunked_responses( + num_chunks: int, + *args: Value, + reset=True, + token=None, +) -> List[ExecuteQueryResponse]: + """ + Creates one ExecuteQuery response per chunk, with the data in args split between chunks. + """ + data_bytes = proto_rows_bytes(*args) + chunks = split_bytes_into_chunks(data_bytes, num_chunks) + responses = [] + for i, chunk in enumerate(chunks): + response = ExecuteQueryResponse() + if i == 0: + response.results.reset = reset + if i == len(chunks) - 1: + response.results.resume_token = token + response.results.batch_checksum = checksum(data_bytes) + response.results.proto_rows_batch.batch_data = chunk + responses.append(response) + return responses + + +def proto_rows_bytes(*args: Value) -> bytes: + rows = ProtoRows() + rows.values = args + return ProtoRows.serialize(rows) + + +def token_only_response(token: bytes) -> ExecuteQueryResponse: + r = ExecuteQueryResponse() + r.results.resume_token = token + return r + + +def proto_rows_batch(*args: Value) -> ProtoRowsBatch: + batch = ProtoRowsBatch() + batch.batch_data = proto_rows_bytes(args) + return batch + + +def str_val(s: str) -> Value: + v = Value() + v.string_value = s + return v + + +def bytes_val(b: bytes) -> Value: + v = Value() + v.bytes_value = b + return v + + +def int_val(i: int) -> Value: + v = Value() + v.int_value = i + return v + + +def null_val() -> Value: + return Value() + + +def str_type() -> Type: + t = Type() + t.string_type = {} + return t + + +def bytes_type() -> Type: + t = Type() + t.bytes_type = {} + return t + + +def int64_type() -> Type: + t = Type() + t.int64_type = {} + return t + + +def float64_type() -> Type: + t = Type() + t.float64_type = {} + return t + + +def float32_type() -> Type: + t = Type() + t.float32_type = {} + return t + + +def bool_type() -> Type: + t = Type() + t.bool_type = {} + return t + + +def ts_type() -> Type: + t = Type() + t.timestamp_type = {} + return t + + +def date_type() -> Type: + t = Type() + t.date_type = {} + return t + + +def proto_type() -> Type: + t = Type() + t.proto_type = {} + return t + + +def enum_type() -> Type: + t = Type() + t.enum_type = {} + return t + + +def array_type(elem_type: Type) -> Type: + t = Type() + arr_type = Type.Array() + arr_type.element_type = elem_type + t.array_type = arr_type + return t diff --git a/tests/unit/data/execute_query/test_byte_cursor.py b/tests/unit/data/execute_query/test_byte_cursor.py new file mode 100644 index 000000000..fc764c86c --- /dev/null +++ b/tests/unit/data/execute_query/test_byte_cursor.py @@ -0,0 +1,171 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + +from .sql_helpers import ( + batch_response, + checksum, + token_only_response, +) + + +def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + +class TestByteCursor: + def test__proto_rows_batch__complete_data(self): + byte_cursor = _ByteCursor() + stream = [ + batch_response(b"123"), + batch_response(b"456"), + batch_response(b"789"), + batch_response(b"0", token=b"token1", checksum=checksum(b"1234567890")), + batch_response(b"abc"), + batch_response(b"def"), + batch_response(b"ghi"), + batch_response(b"j", token=b"token2", checksum=checksum(b"abcdefghij")), + ] + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value[0] == b"1234567890" + assert byte_cursor._resume_token == b"token1" + + value = next(byte_cursor_iter) + assert value[0] == b"abcdefghij" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__empty_proto_rows_batch(self): + byte_cursor = _ByteCursor() + stream = [ + batch_response(b"", token=b"token1"), + batch_response(b"123"), + batch_response(b"0", token=b"token2", checksum=checksum(b"1230")), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value[0] == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__handles_response_with_just_a_token(self): + byte_cursor = _ByteCursor() + stream = [ + token_only_response(b"token1"), + batch_response(b"123"), + batch_response(b"0", token=b"token2", checksum=checksum(b"1230")), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value[0] == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__no_resume_token_at_the_end_of_stream(self): + byte_cursor = _ByteCursor() + stream = [ + batch_response(b"0", token=b"token1", checksum=checksum(b"0")), + batch_response(b"abc"), + batch_response(b"def"), + batch_response(b"ghi"), + batch_response(b"j", checksum=checksum(b"abcdefghij")), + ] + value = byte_cursor.consume(stream[0]) + assert value[0] == b"0" + assert byte_cursor._resume_token == b"token1" + + assert byte_cursor.consume(stream[1]) is None + assert byte_cursor.consume(stream[2]) is None + assert byte_cursor.consume(stream[3]) is None + assert byte_cursor.consume(stream[4]) is None + # Empty should be checked by the iterator and should throw an error if this happens + assert not byte_cursor.empty() + + def test__proto_rows_batch__prepare_for_new_request_resets_buffer(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"abc")) is None + assert ( + byte_cursor.consume( + batch_response(b"def", token=b"token1", checksum=checksum(b"abcdef")) + )[0] + == b"abcdef" + ) + assert byte_cursor.consume(batch_response(b"foo")) is None + assert byte_cursor.prepare_for_new_request() == b"token1" + # foo is dropped because of new request + assert ( + byte_cursor.consume( + batch_response(b"bar", token=b"token2", checksum=checksum(b"bar")) + )[0] + == b"bar" + ) + + def test__proto_rows_batch__multiple_batches_before_token(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert ( + byte_cursor.consume(batch_response(b"bar", checksum=checksum(b"foobar"))) + is None + ) + assert byte_cursor.consume(batch_response(b"1")) is None + assert byte_cursor.consume(batch_response(b"2")) is None + assert ( + byte_cursor.consume(batch_response(b"3", checksum=checksum(b"123"))) is None + ) + batches = byte_cursor.consume( + batch_response(b"done", token=b"token", checksum=checksum(b"done")) + ) + assert len(batches) == 3 + assert batches[0] == b"foobar" + assert batches[1] == b"123" + assert batches[2] == b"done" + + def test__proto_rows_batch__reset_on_partial_batch(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert byte_cursor.consume(batch_response(b"bar", reset=True)) is None + batches = byte_cursor.consume( + batch_response(b"baz", token=b"token", checksum=checksum(b"barbaz")) + ) + assert len(batches) == 1 + assert batches[0] == b"barbaz" + + def test__proto_rows_batch__reset_on_complete_batch(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert ( + byte_cursor.consume(batch_response(b"bar", checksum=checksum(b"foobar"))) + is None + ) + assert byte_cursor.consume(batch_response(b"discard")) is None + assert byte_cursor.consume(batch_response(b"1", reset=True)) is None + assert byte_cursor.consume(batch_response(b"2")) is None + batches = byte_cursor.consume( + batch_response(b"3", token=b"token", checksum=checksum(b"123")) + ) + assert len(batches) == 1 + assert batches[0] == b"123" + + def test__proto_rows_batch__checksum_mismatch(self): + byte_cursor = _ByteCursor() + with pytest.raises( + ValueError, + match="Unexpected checksum mismatch.", + ): + byte_cursor.consume(batch_response(b"foo", checksum=1234)) diff --git a/tests/unit/data/execute_query/test_checksum.py b/tests/unit/data/execute_query/test_checksum.py new file mode 100644 index 000000000..2a391882d --- /dev/null +++ b/tests/unit/data/execute_query/test_checksum.py @@ -0,0 +1,59 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +import sys +from unittest import mock +import warnings + +with warnings.catch_warnings(record=True) as suppressed_warning: + warnings.warn("Supressed warning", RuntimeWarning) + + +def test_import_warning_is_rewritten(): + with mock.patch( + "google.cloud.bigtable.data.execute_query._checksum.import_warning", + suppressed_warning, + ): + with warnings.catch_warnings(record=True) as import_warning: + from google.cloud.bigtable.data.execute_query._checksum import _CRC32C + + # reset this in case the warning has been emitted in other tests + _CRC32C.warn_emitted = False + + assert import_warning == [] + with warnings.catch_warnings(record=True) as first_call_warning: + assert _CRC32C.checksum(b"test") == 2258662080 + assert ( + "Using pure python implementation of `google-crc32` for ExecuteQuery response validation" + in str(first_call_warning[0]) + ) + with warnings.catch_warnings(record=True) as second_call_warning: + assert _CRC32C.checksum(b"test") == 2258662080 + assert second_call_warning == [] + + +@pytest.mark.skipif( + sys.version_info < (3, 9) or sys.version_info > (3, 12), + reason="google_crc32c currently uses pure python for versions not between 3.9 & 3.12", +) +def test_no_warning(): + with warnings.catch_warnings(record=True) as first_call_warning: + from google.cloud.bigtable.data.execute_query._checksum import _CRC32C + + # reset this in case the warning has been emitted in other tests + _CRC32C.warn_emitted = False + + assert _CRC32C.checksum(b"test") == 2258662080 + assert first_call_warning == [] diff --git a/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py new file mode 100644 index 000000000..0a1be1423 --- /dev/null +++ b/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -0,0 +1,326 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.type import date_pb2 +import pytest + +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, + _to_param_types, +) +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.values import Struct +from google.protobuf import timestamp_pb2 +from samples.testdata import singer_pb2 + +timestamp = int( + datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp() +) +dt_micros_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, 123, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc +).timestamp_pb() +pb_date = date_pb2.Date(year=2024, month=5, day=15) + + +@pytest.mark.parametrize( + "input_value,value_field,type_field,expected_value", + [ + (1, "int_value", "int64_type", 1), + ("2", "string_value", "string_type", "2"), + (b"3", "bytes_value", "bytes_type", b"3"), + (True, "bool_value", "bool_type", True), + ( + datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc), + "timestamp_value", + "timestamp_type", + dt_nanos_zero, + ), + ( + datetime.datetime( + 2024, 5, 12, 17, 44, 12, 123, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_micros_non_zero, + ), + (datetime.date(2024, 5, 15), "date_value", "date_type", pb_date), + ( + DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_nanos_non_zero, + ), + ], +) +def test_execute_query_parameters_inferred_types_parsing( + input_value, value_field, type_field, expected_value +): + result = _format_execute_query_params( + { + "test": input_value, + }, + None, + ) + assert result["test"][value_field] == expected_value + assert type_field in result["test"]["type_"] + + +@pytest.mark.parametrize( + "value, sql_type, proto_result", + [ + (1.3, SqlType.Float32(), {"type_": {"float32_type": {}}, "float_value": 1.3}), + (1.3, SqlType.Float64(), {"type_": {"float64_type": {}}, "float_value": 1.3}), + ( + [1, 2, 3, 4], + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ] + }, + }, + ), + ( + [1, None, 2, None], + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + "array_value": { + "values": [ + {"int_value": 1}, + {}, + {"int_value": 2}, + {}, + ] + }, + }, + ), + ( + None, + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + }, + ), + ( + ["foo", "bar", None], + SqlType.Array(SqlType.String()), + { + "type_": {"array_type": {"element_type": {"string_type": {}}}}, + "array_value": { + "values": [ + {"string_value": "foo"}, + {"string_value": "bar"}, + {}, + ] + }, + }, + ), + ( + [b"foo", b"bar", None], + SqlType.Array(SqlType.Bytes()), + { + "type_": {"array_type": {"element_type": {"bytes_type": {}}}}, + "array_value": { + "values": [ + {"bytes_value": b"foo"}, + {"bytes_value": b"bar"}, + {}, + ] + }, + }, + ), + ( + [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + SqlType.Array(SqlType.Timestamp()), + { + "type_": {"array_type": {"element_type": {"timestamp_type": {}}}}, + "array_value": { + "values": [ + {"timestamp_value": timestamp_pb2.Timestamp(seconds=1000)}, + {"timestamp_value": timestamp_pb2.Timestamp(seconds=2000)}, + {}, + ], + }, + }, + ), + ( + [True, False, None], + SqlType.Array(SqlType.Bool()), + { + "type_": {"array_type": {"element_type": {"bool_type": {}}}}, + "array_value": { + "values": [ + {"bool_value": True}, + {"bool_value": False}, + {}, + ], + }, + }, + ), + ( + [datetime.date(2025, 1, 16), datetime.date(2025, 1, 17), None], + SqlType.Array(SqlType.Date()), + { + "type_": {"array_type": {"element_type": {"date_type": {}}}}, + "array_value": { + "values": [ + {"date_value": date_pb2.Date(year=2025, month=1, day=16)}, + {"date_value": date_pb2.Date(year=2025, month=1, day=17)}, + {}, + ], + }, + }, + ), + ( + [1.1, 1.2, None], + SqlType.Array(SqlType.Float32()), + { + "type_": {"array_type": {"element_type": {"float32_type": {}}}}, + "array_value": { + "values": [ + {"float_value": 1.1}, + {"float_value": 1.2}, + {}, + ] + }, + }, + ), + ( + [1.1, 1.2, None], + SqlType.Array(SqlType.Float64()), + { + "type_": {"array_type": {"element_type": {"float64_type": {}}}}, + "array_value": { + "values": [ + {"float_value": 1.1}, + {"float_value": 1.2}, + {}, + ] + }, + }, + ), + ], +) +def test_execute_query_explicit_parameter_parsing(value, sql_type, proto_result): + result = _format_execute_query_params( + {"param_name": value}, {"param_name": sql_type} + ) + print(result) + assert result["param_name"] == proto_result + + +def test_execute_query_parameters_not_supported_types(): + with pytest.raises(ValueError): + _format_execute_query_params({"test1": 1.1}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": {"a": 1}}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": [1]}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": Struct([("field1", 1)])}, None) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": {"a": 1}}, + { + "test1": SqlType.Map(SqlType.String(), SqlType.Int64()), + }, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": Struct([("field1", 1)])}, + {"test1": SqlType.Struct([("field1", SqlType.Int64())])}, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": singer_pb2.Singer()}, + {"test1": SqlType.Proto()}, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": singer_pb2.Genre.ROCK}, + {"test1": SqlType.Enum()}, + ) + + +def test_instance_execute_query_parameters_not_match(): + with pytest.raises(ValueError, match="test2"): + _format_execute_query_params( + { + "test1": 1, + "test2": 1, + }, + { + "test1": SqlType.Int64(), + "test2": SqlType.String(), + }, + ) + + +def test_array_params_enforce_element_type(): + with pytest.raises(ValueError, match="Error when parsing parameter p") as e1: + _format_execute_query_params( + {"p": ["a", 1, None]}, {"p": SqlType.Array(SqlType.String())} + ) + with pytest.raises(ValueError, match="Error when parsing parameter p") as e2: + _format_execute_query_params( + {"p": ["a", 1, None]}, {"p": SqlType.Array(SqlType.Int64())} + ) + assert "Expected query parameter of type str, got int" in str(e1.value.__cause__) + assert "Expected query parameter of type int, got str" in str(e2.value.__cause__) + + +def test_to_params_types(): + results = _to_param_types( + {"a": 1, "s": "str", "b": b"bytes", "array": ["foo", "bar"]}, + {"array": SqlType.Array(SqlType.String())}, + ) + assert results == { + "a": SqlType.Int64()._to_type_pb_dict(), + "s": SqlType.String()._to_type_pb_dict(), + "b": SqlType.Bytes()._to_type_pb_dict(), + "array": SqlType.Array(SqlType.String())._to_type_pb_dict(), + } + + +def test_to_param_types_empty(): + results = _to_param_types({}, {}) + assert results == {} diff --git a/tests/unit/data/execute_query/test_metadata.py b/tests/unit/data/execute_query/test_metadata.py new file mode 100644 index 000000000..c90529d6f --- /dev/null +++ b/tests/unit/data/execute_query/test_metadata.py @@ -0,0 +1,25 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) +from google.cloud.bigtable_v2.types.data import ResultSetMetadata + + +def test_empty_metadata_fails_parsing(): + invalid_md_proto = ResultSetMetadata({"proto_schema": {"columns": []}}) + with pytest.raises(ValueError): + _pb_metadata_to_metadata_types(invalid_md_proto) diff --git a/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/tests/unit/data/execute_query/test_query_result_parsing_utils.py new file mode 100644 index 000000000..ea03dfe9a --- /dev/null +++ b/tests/unit/data/execute_query/test_query_result_parsing_utils.py @@ -0,0 +1,1695 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable_v2 import Type as PBType, Value as PBValue +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_type_to_metadata_type, + SqlType, +) + +from google.type import date_pb2 +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +import datetime + +from tests.unit.data.execute_query.sql_helpers import int64_type, proto_type, enum_type +from samples.testdata import singer_pb2 + +TYPE_BYTES = {"bytes_type": {}} +TYPE_TIMESTAMP = {"timestamp_type": {}} + + +class TestQueryResultParsingUtils: + @pytest.mark.parametrize( + "type_dict,value_dict,expected_metadata_type,expected_value", + [ + (int64_type(), {"int_value": 1}, SqlType.Int64, 1), + ( + {"string_type": {}}, + {"string_value": "test"}, + SqlType.String, + "test", + ), + ({"bool_type": {}}, {"bool_value": False}, SqlType.Bool, False), + ( + {"bytes_type": {}}, + {"bytes_value": b"test"}, + SqlType.Bytes, + b"test", + ), + ( + {"float64_type": {}}, + {"float_value": 17.21}, + SqlType.Float64, + 17.21, + ), + ( + {"timestamp_type": {}}, + {"timestamp_value": {"seconds": 1715864647, "nanos": 12}}, + SqlType.Timestamp, + DatetimeWithNanoseconds( + 2024, 5, 16, 13, 4, 7, nanosecond=12, tzinfo=datetime.timezone.utc + ), + ), + ( + {"date_type": {}}, + {"date_value": {"year": 1800, "month": 12, "day": 0}}, + SqlType.Date, + date_pb2.Date(year=1800, month=12, day=0), + ), + ], + ) + def test_basic_types( + self, type_dict, value_dict, expected_metadata_type, expected_value + ): + _type = PBType(type_dict) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is expected_metadata_type + value = PBValue(value_dict) + assert ( + _parse_pb_value_to_python_value(value._pb, metadata_type, "my_field") + == expected_value + ) + + def test__proto(self): + _type = PBType({"proto_type": {}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Proto + + singer = singer_pb2.Singer(name="John") + value = PBValue({"bytes_value": singer.SerializeToString()}) + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "proto_field" + ) + assert result == singer.SerializeToString() + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + None, + {"proto_field": singer_pb2.Singer()}, + ) + assert result == singer.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "proto_field", + {"proto_field": singer_pb2.Singer()}, + ) + assert result == singer + + def test__enum(self): + _type = PBType({"enum_type": {}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Enum + + value = PBValue({"int_value": 1}) + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "enum_field") + assert result == 1 + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, None, {"enum_field": singer_pb2.Genre} + ) + assert result == 1 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "enum_field", {"enum_field": singer_pb2.Genre} + ) + assert result == "JAZZ" + + # Larger test cases were extracted for readability + def test__array(self): + _type = PBType({"array_type": {"element_type": int64_type()}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Int64 + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ] + } + } + ) + assert _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) == [1, 2, 3, 4] + + def test__array_of_protos(self): + _type = PBType({"array_type": {"element_type": proto_type()}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Proto + + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + value = PBValue( + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"bytes_value": singer2.SerializeToString()}, + ] + } + } + ) + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert result == [singer1.SerializeToString(), singer2.SerializeToString()] + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, None, {"array_field": singer_pb2.Singer()} + ) + assert result == [singer1.SerializeToString(), singer2.SerializeToString()] + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + {"array_field": singer_pb2.Singer()}, + ) + assert result == [singer1, singer2] + + def test__array_of_enums(self): + _type = PBType({"array_type": {"element_type": enum_type()}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Enum + + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 0}, # POP + {"int_value": 1}, # JAZZ + ] + } + } + ) + + # without enum definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert result == [0, 1] + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + {"array_field": singer_pb2.Genre}, + ) + assert result == ["POP", "JAZZ"] + + def test__struct(self): + _type = PBType( + { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": int64_type(), + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"array_type": {"element_type": int64_type()}}, + }, + { + "field_name": "field3", + "type_": {"string_type": {}}, + }, + ] + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + {"int_value": 5}, + ] + } + }, + {"string_value": "test4"}, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Struct + assert type(metadata_type["field1"]) is SqlType.Int64 + assert type(metadata_type[1]) is SqlType.String + assert type(metadata_type[2]) is SqlType.Array + assert type(metadata_type[2].element_type) is SqlType.Int64 + assert type(metadata_type[3]) is SqlType.String + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + metadata_type["field3"] + + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "struct_field" + ) + assert isinstance(result, Struct) + assert result["field1"] == result[0] == 1 + assert result[1] == "test2" + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + result["field3"] + + # duplicate fields accessible by index + assert result[2] == [2, 3, 4, 5] + assert result[3] == "test4" + + def test__struct_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": proto_type(), + }, + { + "field_name": None, + "type_": proto_type(), + }, + { + "field_name": "field2", + "type_": enum_type(), + }, + { + "field_name": None, + "type_": enum_type(), + }, + ] + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"bytes_value": singer2.SerializeToString()}, + {"int_value": 0}, + {"int_value": 1}, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Struct + assert type(metadata_type["field1"]) is SqlType.Proto + assert type(metadata_type["field2"]) is SqlType.Enum + assert type(metadata_type[0]) is SqlType.Proto + assert type(metadata_type[1]) is SqlType.Proto + assert type(metadata_type[2]) is SqlType.Enum + assert type(metadata_type[3]) is SqlType.Enum + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "struct_field" + ) + assert isinstance(result, Struct) + assert result["field1"] == singer1.SerializeToString() + assert result["field2"] == 0 + assert result[0] == singer1.SerializeToString() + assert result[1] == singer2.SerializeToString() + assert result[2] == 0 + assert result[3] == 1 + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "struct_field", + { + "struct_field.field1": singer_pb2.Singer(), + "struct_field.field2": singer_pb2.Genre, + }, + ) + assert isinstance(result, Struct) + assert result["field1"] == singer1 + assert result["field2"] == "POP" + assert result[0] == singer1 + # unnamed proto fields won't get parsed + assert result[1] == singer2.SerializeToString() + assert result[2] == "POP" + # unnamed enum fields won't get parsed + assert result[3] == 1 + + def test__array_of_structs(self): + _type = PBType( + { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": int64_type(), + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"bool_type": {}}, + }, + ] + } + } + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + {"bool_value": False}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + {"bool_value": False}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Struct + assert type(metadata_type.element_type["field1"]) is SqlType.Int64 + assert type(metadata_type.element_type[1]) is SqlType.String + assert type(metadata_type.element_type["field3"]) is SqlType.Bool + + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert isinstance(result, list) + assert len(result) == 4 + + assert isinstance(result[0], Struct) + assert result[0]["field1"] == 1 + assert result[0][1] == "test1" + assert result[0]["field3"] + + assert isinstance(result[1], Struct) + assert result[1]["field1"] == 2 + assert result[1][1] == "test2" + assert not result[1]["field3"] + + assert isinstance(result[2], Struct) + assert result[2]["field1"] == 3 + assert result[2][1] == "test3" + assert result[2]["field3"] + + assert isinstance(result[3], Struct) + assert result[3]["field1"] == 4 + assert result[3][1] == "test4" + assert not result[3]["field3"] + + def test__array_of_structs_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "proto_field", + "type_": proto_type(), + }, + { + "field_name": "enum_field", + "type_": enum_type(), + }, + { + "field_name": None, + "type_": proto_type(), + }, + ] + } + } + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"int_value": 0}, # POP + {"bytes_value": singer1.SerializeToString()}, + ] + } + }, + { + "array_value": { + "values": [ + {"bytes_value": singer2.SerializeToString()}, + {"int_value": 1}, # JAZZ + {"bytes_value": singer2.SerializeToString()}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Struct + assert type(metadata_type.element_type["proto_field"]) is SqlType.Proto + assert type(metadata_type.element_type["enum_field"]) is SqlType.Enum + assert type(metadata_type.element_type[2]) is SqlType.Proto + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert isinstance(result, list) + assert len(result) == 2 + assert isinstance(result[0], Struct) + assert result[0]["proto_field"] == singer1.SerializeToString() + assert result[0]["enum_field"] == 0 + assert result[0][2] == singer1.SerializeToString() + assert isinstance(result[1], Struct) + assert result[1]["proto_field"] == singer2.SerializeToString() + assert result[1]["enum_field"] == 1 + assert result[1][2] == singer2.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + { + "array_field.proto_field": singer_pb2.Singer(), + "array_field.enum_field": singer_pb2.Genre, + "array_field": singer_pb2.Singer(), # unused + }, + ) + assert isinstance(result, list) + assert len(result) == 2 + assert isinstance(result[0], Struct) + assert result[0]["proto_field"] == singer1 + assert result[0]["enum_field"] == "POP" + # unnamed proto fields won't get parsed + assert result[0][2] == singer1.SerializeToString() + assert isinstance(result[1], Struct) + assert result[1]["proto_field"] == singer2 + assert result[1]["enum_field"] == "JAZZ" + # unnamed proto fields won't get parsed + assert result[1][2] == singer2.SerializeToString() + + def test__map(self): + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": {"string_type": {}}, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.String + + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert isinstance(result, dict) + assert len(result) == 4 + + assert result == { + 1: "test1", + 2: "test2", + 3: "test3", + 4: "test4", + } + + def test__map_repeated_values(self): + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": {"string_type": {}}, + } + }, + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test3"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert len(result) == 1 + + assert result == { + 1: "test3", + } + + def test__map_with_protos(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": proto_type(), + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"bytes_value": singer1.SerializeToString()}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"bytes_value": singer2.SerializeToString()}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Proto + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == singer1.SerializeToString() + assert result[2] == singer2.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value": singer_pb2.Singer(), + }, + ) + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == singer1 + assert result[2] == singer2 + + def test__map_with_enums(self): + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": enum_type(), + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 0}, # POP + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"int_value": 1}, # JAZZ + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Enum + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == 0 + assert result[2] == 1 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value": singer_pb2.Genre, + }, + ) + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == "POP" + assert result[2] == "JAZZ" + + def test__map_of_maps_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": { + "map_type": { + "key_type": {"string_type": {}}, + "value_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": int64_type(), + }, + { + "field_name": "field2", + "type_": {"string_type": {}}, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (int, map) tuples + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 1}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_1"}, + { + "array_value": { + "values": [ + { + "int_value": 1 + }, + { + "string_value": "test1" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_2"}, + { + "array_value": { + "values": [ + { + "int_value": 2 + }, + { + "string_value": "test2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 2}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_1"}, + { + "array_value": { + "values": [ + { + "int_value": 3 + }, + { + "string_value": "test3" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_2"}, + { + "array_value": { + "values": [ + { + "int_value": 4 + }, + { + "string_value": "test4" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Map + assert type(metadata_type.value_type.key_type) is SqlType.String + assert type(metadata_type.value_type.value_type) is SqlType.Struct + assert type(metadata_type.value_type.value_type["field1"]) is SqlType.Int64 + assert type(metadata_type.value_type.value_type["field2"]) is SqlType.String + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + + assert result[1]["1_1"]["field1"] == 1 + assert result[1]["1_1"]["field2"] == "test1" + + assert result[1]["1_2"]["field1"] == 2 + assert result[1]["1_2"]["field2"] == "test2" + + assert result[2]["2_1"]["field1"] == 3 + assert result[2]["2_1"]["field2"] == "test3" + + assert result[2]["2_2"]["field1"] == 4 + assert result[2]["2_2"]["field2"] == "test4" + + def test__map_of_maps_of_structs_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": { + "map_type": { + "key_type": {"string_type": {}}, + "value_type": { + "struct_type": { + "fields": [ + { + "field_name": "int_field", + "type_": int64_type(), + }, + { + "field_name": "singer", + "type_": proto_type(), + }, + { + "field_name": "genre", + "type_": enum_type(), + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (int, map) tuples + { + "array_value": { + "values": [ # (int, map) tuples + {"int_value": 1}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "1_1"}, + { + "array_value": { + "values": [ + { + "int_value": 12 + }, + { + "bytes_value": singer1.SerializeToString() + }, + { + "int_value": 0 + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "1_2"}, + { + "array_value": { + "values": [ + { + "int_value": 34 + }, + { + "bytes_value": singer2.SerializeToString() + }, + { + "int_value": 1 + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (int, map) tuples + {"int_value": 2}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "2_1"}, + { + "array_value": { + "values": [ + { + "int_value": 56 + }, + { + "bytes_value": singer1.SerializeToString() + }, + { + "int_value": 2 + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "2_2"}, + { + "array_value": { + "values": [ + { + "int_value": 78 + }, + { + "bytes_value": singer2.SerializeToString() + }, + { + "int_value": 3 + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Map + assert type(metadata_type.value_type.key_type) is SqlType.String + assert type(metadata_type.value_type.value_type) is SqlType.Struct + assert type(metadata_type.value_type.value_type["int_field"]) is SqlType.Int64 + assert type(metadata_type.value_type.value_type["singer"]) is SqlType.Proto + assert type(metadata_type.value_type.value_type["genre"]) is SqlType.Enum + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + + assert result[1]["1_1"]["int_field"] == 12 + assert result[1]["1_1"]["singer"] == singer1.SerializeToString() + assert result[1]["1_1"]["genre"] == 0 + + assert result[1]["1_2"]["int_field"] == 34 + assert result[1]["1_2"]["singer"] == singer2.SerializeToString() + assert result[1]["1_2"]["genre"] == 1 + + assert result[2]["2_1"]["int_field"] == 56 + assert result[2]["2_1"]["singer"] == singer1.SerializeToString() + assert result[2]["2_1"]["genre"] == 2 + + assert result[2]["2_2"]["int_field"] == 78 + assert result[2]["2_2"]["singer"] == singer2.SerializeToString() + assert result[2]["2_2"]["genre"] == 3 + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value.singer": singer_pb2.Singer(), + "map_field.value.value.genre": singer_pb2.Genre, + }, + ) + + assert result[1]["1_1"]["int_field"] == 12 + assert result[1]["1_1"]["singer"] == singer1 + assert result[1]["1_1"]["genre"] == "POP" + + assert result[1]["1_2"]["int_field"] == 34 + assert result[1]["1_2"]["singer"] == singer2 + assert result[1]["1_2"]["genre"] == "JAZZ" + + assert result[2]["2_1"]["int_field"] == 56 + assert result[2]["2_1"]["singer"] == singer1 + assert result[2]["2_1"]["genre"] == "FOLK" + + assert result[2]["2_2"]["int_field"] == 78 + assert result[2]["2_2"]["singer"] == singer2 + assert result[2]["2_2"]["genre"] == "ROCK" + + def test__map_of_lists_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": TYPE_BYTES, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + { + "bytes_value": b"key1-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + { + "bytes_value": b"key1-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + { + "bytes_value": b"key2-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + { + "bytes_value": b"key2-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Bytes + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == b"key1-value1" + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == b"key1-value2" + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == b"key2-value1" + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == b"key2-value2" + + def test__map_of_lists_of_structs_with_protos(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + singer3 = singer_pb2.Singer(name="Jay") + singer4 = singer_pb2.Singer(name="Eric") + + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": proto_type(), + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + { + "bytes_value": singer1.SerializeToString() + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + { + "bytes_value": singer2.SerializeToString() + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + { + "bytes_value": singer3.SerializeToString() + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + { + "bytes_value": singer4.SerializeToString() + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Proto + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == singer1.SerializeToString() + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == singer2.SerializeToString() + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == singer3.SerializeToString() + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == singer4.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value": singer_pb2.Singer(), + }, + ) + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == singer1 + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == singer2 + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == singer3 + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == singer4 + + def test__map_of_lists_of_structs_with_enums(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": enum_type(), + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + {"int_value": 0}, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + {"int_value": 1}, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + {"int_value": 2}, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + {"int_value": 3}, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Enum + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == 0 + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == 1 + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == 2 + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == 3 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value": singer_pb2.Genre, + }, + ) + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == "POP" + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == "JAZZ" + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == "FOLK" + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == "ROCK" + + def test__invalid_type_throws_exception(self): + _type = PBType({"string_type": {}}) + value = PBValue({"int_value": 1}) + metadata_type = _pb_type_to_metadata_type(_type) + + with pytest.raises( + ValueError, + match="string_value field for String type not found in a Value.", + ): + _parse_pb_value_to_python_value(value._pb, metadata_type, "string_field") diff --git a/tests/unit/data/execute_query/test_query_result_row_reader.py b/tests/unit/data/execute_query/test_query_result_row_reader.py new file mode 100644 index 000000000..ab98b54bd --- /dev/null +++ b/tests/unit/data/execute_query/test_query_result_row_reader.py @@ -0,0 +1,303 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from unittest import mock +from google.cloud.bigtable_v2.types.data import Value as PBValue +from google.cloud.bigtable.data.execute_query._reader import _QueryResultRowReader + +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + SqlType, + _pb_metadata_to_metadata_types, +) + +import google.cloud.bigtable.data.execute_query._reader +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + proto_rows_bytes, + str_val, + bytes_val, +) +from samples.testdata import singer_pb2 + + +class TestQueryResultRowReader: + def test__single_values_received(self): + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) + values = [ + proto_rows_bytes(int_val(1), int_val(2)), + proto_rows_bytes(int_val(3), int_val(4)), + ] + + reader = _QueryResultRowReader() + + result = reader.consume(values[0:1], metadata) + assert len(result) == 1 + assert len(result[0]) == 2 + result = reader.consume(values[1:], metadata) + assert len(result) == 1 + assert len(result[0]) == 2 + + def test__multiple_rows_received(self): + values = [ + proto_rows_bytes(int_val(1), int_val(2), int_val(3), int_val(4)), + proto_rows_bytes(int_val(5), int_val(6)), + proto_rows_bytes(int_val(7), int_val(8)), + ] + + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) + reader = _QueryResultRowReader() + + result = reader.consume(values[0:1], metadata) + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 1 + assert result[0][1] == result[0]["test2"] == 2 + + assert len(result[1]) == 2 + assert result[1][0] == result[1]["test1"] == 3 + assert result[1][1] == result[1]["test2"] == 4 + + result = reader.consume(values[1:2], metadata) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 5 + assert result[0][1] == result[0]["test2"] == 6 + + result = reader.consume(values[2:], metadata) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 7 + assert result[0][1] == result[0]["test2"] == 8 + + def test__received_values_are_passed_to_parser_in_batches(self): + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) + + # TODO move to a SqlType test + assert SqlType.Struct([("a", SqlType.Int64())]) == SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.String())]) != SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.Int64())]) != SqlType.Struct( + [("b", SqlType.Int64())] + ) + + assert SqlType.Array(SqlType.Int64()) == SqlType.Array(SqlType.Int64()) + assert SqlType.Array(SqlType.Int64()) != SqlType.Array(SqlType.String()) + + assert SqlType.Map(SqlType.Int64(), SqlType.String()) == SqlType.Map( + SqlType.Int64(), SqlType.String() + ) + assert SqlType.Map(SqlType.Int64(), SqlType.String()) != SqlType.Map( + SqlType.String(), SqlType.String() + ) + + reader = _QueryResultRowReader() + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + ) as parse_mock: + reader.consume([proto_rows_bytes(int_val(1), int_val(2))], metadata) + parse_mock.assert_has_calls( + [ + mock.call(PBValue(int_val(1)), SqlType.Int64(), "test1", None), + mock.call(PBValue(int_val(2)), SqlType.Int64(), "test2", None), + ] + ) + + def test__parser_errors_are_forwarded(self): + metadata = Metadata([("test1", SqlType.Int64())]) + + values = [str_val("test")] + + reader = _QueryResultRowReader() + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + side_effect=ValueError("test"), + ) as parse_mock: + with pytest.raises(ValueError, match="test"): + reader.consume([proto_rows_bytes(values[0])], metadata) + + parse_mock.assert_has_calls( + [ + mock.call(PBValue(values[0]), SqlType.Int64(), "test1", None), + ] + ) + + def test__multiple_proto_rows_received_with_one_resume_token(self): + from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + + def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + stream = [ + *chunked_responses( + 4, int_val(1), int_val(2), int_val(3), int_val(4), token=b"token1" + ), + *chunked_responses(1, int_val(5), int_val(6), token=b"token2"), + ] + + byte_cursor = _ByteCursor() + reader = _QueryResultRowReader() + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + md = _pb_metadata_to_metadata_types( + metadata(column("test1", int64_type()), column("test2", int64_type())) + ) + + returned_values = [] + + def intercept_return_values(func): + nonlocal intercept_return_values + + def wrapped(*args, **kwargs): + value = func(*args, **kwargs) + returned_values.append(value) + return value + + return wrapped + + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter), md) + + # Despite the fact that two ProtoRows were received, a single resume_token after the second ProtoRows object forces us to parse them together. + # We will interpret them as one larger ProtoRows object. + assert len(returned_values) == 1 + assert len(returned_values[0]) == 4 + assert returned_values[0][0].int_value == 1 + assert returned_values[0][1].int_value == 2 + assert returned_values[0][2].int_value == 3 + assert returned_values[0][3].int_value == 4 + + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == 1 + assert result[0]["test1"] == 1 + assert result[0][1] == 2 + assert result[0]["test2"] == 2 + assert len(result[1]) == 2 + assert result[1][0] == 3 + assert result[1]["test1"] == 3 + assert result[1][1] == 4 + assert result[1]["test2"] == 4 + assert byte_cursor._resume_token == b"token1" + + returned_values = [] + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter), md) + + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == 5 + assert result[0]["test1"] == 5 + assert result[0][1] == 6 + assert result[0]["test2"] == 6 + assert byte_cursor._resume_token == b"token2" + + def test_multiple_batches(self): + reader = _QueryResultRowReader() + batches = [ + proto_rows_bytes(int_val(1), int_val(2), int_val(3), int_val(4)), + proto_rows_bytes(int_val(5), int_val(6)), + proto_rows_bytes(int_val(7), int_val(8)), + ] + results = reader.consume( + batches, + Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]), + ) + assert len(results) == 4 + [row1, row2, row3, row4] = results + assert row1["test1"] == 1 + assert row1["test2"] == 2 + assert row2["test1"] == 3 + assert row2["test2"] == 4 + assert row3["test1"] == 5 + assert row3["test2"] == 6 + assert row4["test1"] == 7 + assert row4["test2"] == 8 + + def test_multiple_batches_with_proto_and_enum_types(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + singer3 = singer_pb2.Singer(name="Jay") + singer4 = singer_pb2.Singer(name="Eric") + + reader = _QueryResultRowReader() + batches = [ + proto_rows_bytes( + bytes_val(singer1.SerializeToString()), + int_val(0), + bytes_val(singer2.SerializeToString()), + int_val(1), + ), + proto_rows_bytes(bytes_val(singer3.SerializeToString()), int_val(2)), + proto_rows_bytes(bytes_val(singer4.SerializeToString()), int_val(3)), + ] + + results = reader.consume( + batches, + Metadata([("singer", SqlType.Proto()), ("genre", SqlType.Enum())]), + {"singer": singer_pb2.Singer(), "genre": singer_pb2.Genre}, + ) + assert len(results) == 4 + [row1, row2, row3, row4] = results + assert row1["singer"] == singer1 + assert row1["genre"] == "POP" + assert row2["singer"] == singer2 + assert row2["genre"] == "JAZZ" + assert row3["singer"] == singer3 + assert row3["genre"] == "FOLK" + assert row4["singer"] == singer4 + assert row4["genre"] == "ROCK" + + +class TestMetadata: + def test__duplicate_column_names(self): + metadata = Metadata( + [ + ("test1", SqlType.Int64()), + ("test2", SqlType.Bytes()), + ("test2", SqlType.String()), + ] + ) + assert metadata[0].column_name == "test1" + assert metadata["test1"].column_type == SqlType.Int64() + + # duplicate columns not accesible by name + with pytest.raises(KeyError, match="Ambigious column name"): + metadata["test2"] + + # duplicate columns accessible by index + assert metadata[1].column_type == SqlType.Bytes() + assert metadata[1].column_name == "test2" + assert metadata[2].column_type == SqlType.String() + assert metadata[2].column_name == "test2" diff --git a/tests/unit/read-rows-acceptance-test.json b/tests/unit/data/read-rows-acceptance-test.json similarity index 100% rename from tests/unit/read-rows-acceptance-test.json rename to tests/unit/data/read-rows-acceptance-test.json diff --git a/tests/unit/data/test__helpers.py b/tests/unit/data/test__helpers.py new file mode 100644 index 000000000..c8540024d --- /dev/null +++ b/tests/unit/data/test__helpers.py @@ -0,0 +1,363 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +import grpc +from google.api_core import exceptions as core_exceptions +import google.cloud.bigtable.data._helpers as _helpers +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT + +import mock + + +class TestAttemptTimeoutGenerator: + @pytest.mark.parametrize( + "request_t,operation_t,expected_list", + [ + (1, 3.5, [1, 1, 1, 0.5, 0, 0]), + (None, 3.5, [3.5, 2.5, 1.5, 0.5, 0, 0]), + (10, 5, [5, 4, 3, 2, 1, 0, 0]), + (3, 3, [3, 2, 1, 0, 0, 0, 0]), + (0, 3, [0, 0, 0]), + (3, 0, [0, 0, 0]), + (-1, 3, [0, 0, 0]), + (3, -1, [0, 0, 0]), + ], + ) + def test_attempt_timeout_generator(self, request_t, operation_t, expected_list): + """ + test different values for timeouts. Clock is incremented by 1 second for each item in expected_list + """ + timestamp_start = 123 + with mock.patch("time.monotonic") as mock_monotonic: + mock_monotonic.return_value = timestamp_start + generator = _helpers._attempt_timeout_generator(request_t, operation_t) + for val in expected_list: + mock_monotonic.return_value += 1 + assert next(generator) == val + + @pytest.mark.parametrize( + "request_t,operation_t,expected", + [ + (1, 3.5, 1), + (None, 3.5, 3.5), + (10, 5, 5), + (5, 10, 5), + (3, 3, 3), + (0, 3, 0), + (3, 0, 0), + (-1, 3, 0), + (3, -1, 0), + ], + ) + def test_attempt_timeout_frozen_time(self, request_t, operation_t, expected): + """test with time.monotonic frozen""" + timestamp_start = 123 + with mock.patch("time.monotonic") as mock_monotonic: + mock_monotonic.return_value = timestamp_start + generator = _helpers._attempt_timeout_generator(request_t, operation_t) + assert next(generator) == expected + # value should not change without time.monotonic changing + assert next(generator) == expected + + def test_attempt_timeout_w_sleeps(self): + """use real sleep values to make sure it matches expectations""" + from time import sleep + + operation_timeout = 1 + generator = _helpers._attempt_timeout_generator(None, operation_timeout) + expected_value = operation_timeout + sleep_time = 0.1 + for i in range(3): + found_value = next(generator) + assert abs(found_value - expected_value) < 0.1 + sleep(sleep_time) + expected_value -= sleep_time + + +class TestValidateTimeouts: + def test_validate_timeouts_error_messages(self): + with pytest.raises(ValueError) as e: + _helpers._validate_timeouts(operation_timeout=1, attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + _helpers._validate_timeouts(operation_timeout=-1, attempt_timeout=1) + assert "operation_timeout must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + ([1, None, False], False), + ([1, None, True], True), + ([1, 1, False], True), + ([1, 1, True], True), + ([1, 1], True), + ([1, None], False), + ([2, 1], True), + ([0, 1], False), + ([1, 0], False), + ([60, None], False), + ([600, None], False), + ([600, 600], True), + ], + ) + def test_validate_with_inputs(self, args, expected): + """ + test whether an exception is thrown with different inputs + """ + success = False + try: + _helpers._validate_timeouts(*args) + success = True + except ValueError: + pass + assert success == expected + + +class TestGetTimeouts: + @pytest.mark.parametrize( + "input_times,input_table,expected", + [ + ((2, 1), {}, (2, 1)), + ((2, 4), {}, (2, 2)), + ((2, None), {}, (2, 2)), + ( + (TABLE_DEFAULT.DEFAULT, TABLE_DEFAULT.DEFAULT), + {"operation": 3, "attempt": 2}, + (3, 2), + ), + ( + (TABLE_DEFAULT.READ_ROWS, TABLE_DEFAULT.READ_ROWS), + {"read_rows_operation": 3, "read_rows_attempt": 2}, + (3, 2), + ), + ( + (TABLE_DEFAULT.MUTATE_ROWS, TABLE_DEFAULT.MUTATE_ROWS), + {"mutate_rows_operation": 3, "mutate_rows_attempt": 2}, + (3, 2), + ), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": None}, (10, 10)), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": 5}, (10, 5)), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": 100}, (10, 10)), + ((TABLE_DEFAULT.DEFAULT, 10), {"operation": 12}, (12, 10)), + ((TABLE_DEFAULT.DEFAULT, 10), {"operation": 3}, (3, 3)), + ], + ) + def test_get_timeouts(self, input_times, input_table, expected): + """ + test input/output mappings for a variety of valid inputs + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"default_{key}_timeout", input_table[key]) + t1, t2 = _helpers._get_timeouts(input_times[0], input_times[1], fake_table) + assert t1 == expected[0] + assert t2 == expected[1] + + @pytest.mark.parametrize( + "input_times,input_table", + [ + ([0, 1], {}), + ([1, 0], {}), + ([None, 1], {}), + ([TABLE_DEFAULT.DEFAULT, 1], {"operation": None}), + ([TABLE_DEFAULT.DEFAULT, 1], {"operation": 0}), + ([1, TABLE_DEFAULT.DEFAULT], {"attempt": 0}), + ], + ) + def test_get_timeouts_invalid(self, input_times, input_table): + """ + test with inputs that should raise error during validation step + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"default_{key}_timeout", input_table[key]) + with pytest.raises(ValueError): + _helpers._get_timeouts(input_times[0], input_times[1], fake_table) + + +class TestAlignTimeouts: + @pytest.mark.parametrize( + "input_times,expected", + [ + ((2, 1), (2, 1)), + ((2, 4), (2, 2)), + ((2, None), (2, 2)), + ], + ) + def test_get_timeouts(self, input_times, expected): + """ + test input/output mappings for a variety of valid inputs + """ + t1, t2 = _helpers._align_timeouts(input_times[0], input_times[1]) + assert t1 == expected[0] + assert t2 == expected[1] + + @pytest.mark.parametrize( + "input_times", + [ + ([0, 1]), + ([1, 0]), + ([None, 1]), + ], + ) + def test_get_timeouts_invalid(self, input_times): + """ + test with inputs that should raise error during validation step + """ + with pytest.raises(ValueError): + _helpers._align_timeouts(input_times[0], input_times[1]) + + +class TestGetRetryableErrors: + @pytest.mark.parametrize( + "input_codes,input_table,expected", + [ + ((), {}, []), + ((Exception,), {}, [Exception]), + (TABLE_DEFAULT.DEFAULT, {"default": [Exception]}, [Exception]), + ( + TABLE_DEFAULT.READ_ROWS, + {"default_read_rows": (RuntimeError, ValueError)}, + [RuntimeError, ValueError], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + {"default_mutate_rows": (ValueError,)}, + [ValueError], + ), + ((4,), {}, [core_exceptions.DeadlineExceeded]), + ( + [grpc.StatusCode.DEADLINE_EXCEEDED], + {}, + [core_exceptions.DeadlineExceeded], + ), + ( + (14, grpc.StatusCode.ABORTED, RuntimeError), + {}, + [ + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + RuntimeError, + ], + ), + ], + ) + def test_get_retryable_errors(self, input_codes, input_table, expected): + """ + test input/output mappings for a variety of valid inputs + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"{key}_retryable_errors", input_table[key]) + result = _helpers._get_retryable_errors(input_codes, fake_table) + assert result == expected + + +class TestTrackedBackoffGenerator: + def test_tracked_backoff_generator_history(self): + """ + Should be able to retrieve historical results from backoff generator + """ + generator = _helpers.TrackedBackoffGenerator( + initial=0, multiplier=2, maximum=10 + ) + got_list = [next(generator) for _ in range(20)] + + # check all values are correct + for i in range(19, 0, -1): + assert generator.get_attempt_backoff(i) == got_list[i] + # check a random value out of order + assert generator.get_attempt_backoff(5) == got_list[5] + + @mock.patch("random.uniform", side_effect=lambda a, b: b) + def test_tracked_backoff_generator_defaults(self, mock_uniform): + """ + Should generate values with default parameters + + initial=0.01, multiplier=2, maximum=60 + """ + generator = _helpers.TrackedBackoffGenerator() + expected_values = [0.01, 0.02, 0.04, 0.08, 0.16] + for expected in expected_values: + assert next(generator) == pytest.approx(expected) + + @mock.patch("random.uniform", side_effect=lambda a, b: b) + def test_tracked_backoff_generator_with_maximum(self, mock_uniform): + """ + Should cap the backoff at the maximum value + """ + generator = _helpers.TrackedBackoffGenerator(initial=1, multiplier=2, maximum=5) + expected_values = [1, 2, 4, 5, 5, 5] + for expected in expected_values: + assert next(generator) == expected + + def test_get_attempt_backoff_out_of_bounds(self): + """ + get_attempt_backoff should raise IndexError for out of bounds index + """ + generator = _helpers.TrackedBackoffGenerator() + next(generator) + next(generator) + with pytest.raises(IndexError): + generator.get_attempt_backoff(2) + with pytest.raises(IndexError): + generator.get_attempt_backoff(-3) + + def test_set_next_full_set(self): + """ + try always using set_next to populate generator + """ + generator = _helpers.TrackedBackoffGenerator() + for idx, val in enumerate(range(100, 0, -1)): + generator.set_next(val) + got = next(generator) + assert got == val + assert generator.get_attempt_backoff(idx) == val + + def test_set_next_negative_value(self): + generator = _helpers.TrackedBackoffGenerator() + with pytest.raises(ValueError): + generator.set_next(-1) + + @mock.patch("random.uniform", side_effect=lambda a, b: b) + def test_interleaved_set_next(self, mock_uniform): + import itertools + + generator = _helpers.TrackedBackoffGenerator( + initial=1, multiplier=2, maximum=128 + ) + # values we expect generator to create + expected_values = [2**i for i in range(8)] + # values we will insert + inserted_values = [9, 61, 0, 4, 33, 12, 18, 2] + for idx in range(8): + assert next(generator) == expected_values[idx] + generator.set_next(inserted_values[idx]) + assert next(generator) == inserted_values[idx] + # check to make sure history is as we expect + generator.history = itertools.chain.from_iterable( + zip(expected_values, inserted_values) + ) + + @mock.patch("random.uniform", side_effect=lambda a, b: b) + def test_set_next_replacement(self, mock_uniform): + generator = _helpers.TrackedBackoffGenerator(initial=1) + generator.set_next(99) + generator.set_next(88) + assert next(generator) == 88 + assert next(generator) == 1 diff --git a/tests/unit/data/test_exceptions.py b/tests/unit/data/test_exceptions.py new file mode 100644 index 000000000..bc921717e --- /dev/null +++ b/tests/unit/data/test_exceptions.py @@ -0,0 +1,533 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import pytest +import sys + +import google.cloud.bigtable.data.exceptions as bigtable_exceptions + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TracebackTests311: + """ + Provides a set of tests that should be run on python 3.11 and above, + to verify that the exception traceback looks as expected + """ + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_traceback(self): + """ + Exception customizations should not break rich exception group traceback in python 3.11 + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + sub_exc2 = ZeroDivisionError("second sub exception") + sub_group = self._make_one(excs=[sub_exc2]) + exc_group = self._make_one(excs=[sub_exc1, sub_group]) + + expected_traceback = ( + f" | google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {str(exc_group)}", + " +-+---------------- 1 ----------------", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + f" | google.cloud.bigtable.data.exceptions.{type(sub_group).__name__}: {str(sub_group)}", + " +-+---------------- 1 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_traceback_with_cause(self): + """ + traceback should display nicely with sub-exceptions with __cause__ set + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + cause_exc = ImportError("cause exception") + sub_exc1.__cause__ = cause_exc + sub_exc2 = ZeroDivisionError("second sub exception") + exc_group = self._make_one(excs=[sub_exc1, sub_exc2]) + + expected_traceback = ( + f" | google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {str(exc_group)}", + " +-+---------------- 1 ----------------", + " | ImportError: cause exception", + " | ", + " | The above exception was the direct cause of the following exception:", + " | ", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_exception_group(self): + """ + Python 3.11+ should handle exepctions as native exception groups + """ + exceptions = [RuntimeError("mock"), ValueError("mock")] + instance = self._make_one(excs=exceptions) + # ensure split works as expected + runtime_error, others = instance.split(lambda e: isinstance(e, RuntimeError)) + assert runtime_error.exceptions[0] == exceptions[0] + assert others.exceptions[0] == exceptions[1] + + +class TracebackTests310: + """ + Provides a set of tests that should be run on python 3.10 and under, + to verify that the exception traceback looks as expected + """ + + @pytest.mark.skipif( + sys.version_info >= (3, 11), reason="requires python3.10 or lower" + ) + def test_310_traceback(self): + """ + Exception customizations should not break rich exception group traceback in python 3.10 + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + sub_exc2 = ZeroDivisionError("second sub exception") + sub_group = self._make_one(excs=[sub_exc2]) + exc_group = self._make_one(excs=[sub_exc1, sub_group]) + found_message = str(exc_group).splitlines()[0] + found_sub_message = str(sub_group).splitlines()[0] + + expected_traceback = ( + f"google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {found_message}", + "--+---------------- 1 ----------------", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + f" | {type(sub_group).__name__}: {found_sub_message}", + " --+---------------- 1 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info >= (3, 11), reason="requires python3.10 or lower" + ) + def test_310_traceback_with_cause(self): + """ + traceback should display nicely with sub-exceptions with __cause__ set + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + cause_exc = ImportError("cause exception") + sub_exc1.__cause__ = cause_exc + sub_exc2 = ZeroDivisionError("second sub exception") + exc_group = self._make_one(excs=[sub_exc1, sub_exc2]) + found_message = str(exc_group).splitlines()[0] + + expected_traceback = ( + f"google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {found_message}", + "--+---------------- 1 ----------------", + " | ImportError: cause exception", + " | ", + " | The above exception was the direct cause of the following exception:", + " | ", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + +class TestBigtableExceptionGroup(TracebackTests311, TracebackTests310): + """ + Subclass for MutationsExceptionGroup, RetryExceptionGroup, and ShardedReadRowsExceptionGroup + """ + + def _get_class(self): + from google.cloud.bigtable.data.exceptions import _BigtableExceptionGroup + + return _BigtableExceptionGroup + + def _make_one(self, message="test_message", excs=None): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(message, excs=excs) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_msg = "test message" + test_excs = [Exception(test_msg)] + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_msg, test_excs) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == test_msg + assert list(e.value.exceptions) == test_excs + + def test_raise_empty_list(self): + """ + Empty exception lists are not supported + """ + with pytest.raises(ValueError) as e: + raise self._make_one(excs=[]) + assert "non-empty sequence" in str(e.value) + + def test_exception_handling(self): + """ + All versions should inherit from exception + and support tranditional exception handling + """ + instance = self._make_one() + assert isinstance(instance, Exception) + try: + raise instance + except Exception as e: + assert isinstance(e, Exception) + assert e == instance + was_raised = True + assert was_raised + + +class TestMutationsExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + + return MutationsExceptionGroup + + def _make_one(self, excs=None, num_entries=3): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(excs, num_entries) + + @pytest.mark.parametrize( + "exception_list,total_entries,expected_message", + [ + ([Exception()], 1, "1 failed entry from 1 attempted."), + ([Exception()], 2, "1 failed entry from 2 attempted."), + ( + [Exception(), RuntimeError()], + 2, + "2 failed entries from 2 attempted.", + ), + ], + ) + def test_raise(self, exception_list, total_entries, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, total_entries) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + + def test_raise_custom_message(self): + """ + should be able to set a custom error message + """ + custom_message = "custom message" + exception_list = [Exception()] + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, 5, message=custom_message) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == custom_message + assert list(e.value.exceptions) == exception_list + + @pytest.mark.parametrize( + "first_list_len,second_list_len,total_excs,entry_count,expected_message", + [ + (3, 0, 3, 4, "3 failed entries from 4 attempted."), + (1, 0, 1, 2, "1 failed entry from 2 attempted."), + (0, 1, 1, 2, "1 failed entry from 2 attempted."), + (2, 2, 4, 4, "4 failed entries from 4 attempted."), + ( + 1, + 1, + 3, + 2, + "3 failed entries from 2 attempted. (first 1 and last 1 attached as sub-exceptions; 1 truncated)", + ), + ( + 1, + 2, + 100, + 2, + "100 failed entries from 2 attempted. (first 1 and last 2 attached as sub-exceptions; 97 truncated)", + ), + ( + 2, + 1, + 4, + 9, + "4 failed entries from 9 attempted. (first 2 and last 1 attached as sub-exceptions; 1 truncated)", + ), + ( + 3, + 0, + 10, + 10, + "10 failed entries from 10 attempted. (first 3 attached as sub-exceptions; 7 truncated)", + ), + ( + 0, + 3, + 10, + 10, + "10 failed entries from 10 attempted. (last 3 attached as sub-exceptions; 7 truncated)", + ), + ], + ) + def test_from_truncated_lists( + self, first_list_len, second_list_len, total_excs, entry_count, expected_message + ): + """ + Should be able to make MutationsExceptionGroup using a pair of + lists representing a larger truncated list of exceptions + """ + first_list = [Exception()] * first_list_len + second_list = [Exception()] * second_list_len + with pytest.raises(self._get_class()) as e: + raise self._get_class().from_truncated_lists( + first_list, second_list, total_excs, entry_count + ) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == first_list + second_list + + +class TestRetryExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + return RetryExceptionGroup + + def _make_one(self, excs=None): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(excs=excs) + + @pytest.mark.parametrize( + "exception_list,expected_message", + [ + ([Exception()], "1 failed attempt"), + ([Exception(), RuntimeError()], "2 failed attempts"), + ( + [Exception(), ValueError("test")], + "2 failed attempts", + ), + ( + [ + bigtable_exceptions.RetryExceptionGroup( + [Exception(), ValueError("test")] + ) + ], + "1 failed attempt", + ), + ], + ) + def test_raise(self, exception_list, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + + +class TestShardedReadRowsExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + + return ShardedReadRowsExceptionGroup + + def _make_one(self, excs=None, succeeded=None, num_entries=3): + if excs is None: + excs = [RuntimeError("mock")] + succeeded = succeeded or [] + + return self._get_class()(excs, succeeded, num_entries) + + @pytest.mark.parametrize( + "exception_list,succeeded,total_entries,expected_message", + [ + ([Exception()], [], 1, "1 sub-exception (from 1 query attempted)"), + ([Exception()], [1], 2, "1 sub-exception (from 2 queries attempted)"), + ( + [Exception(), RuntimeError()], + [0, 1], + 2, + "2 sub-exceptions (from 2 queries attempted)", + ), + ], + ) + def test_raise(self, exception_list, succeeded, total_entries, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, succeeded, total_entries) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + assert e.value.successful_rows == succeeded + + +class TestFailedMutationEntryError: + def _get_class(self): + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + return FailedMutationEntryError + + def _make_one(self, idx=9, entry=mock.Mock(), cause=RuntimeError("mock")): + return self._get_class()(idx, entry, cause) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_idx = 2 + test_entry = mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed idempotent mutation entry at index 2" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) + assert test_entry.is_idempotent.call_count == 1 + + def test_raise_idempotent(self): + """ + Test raise with non idempotent entry + """ + test_idx = 2 + test_entry = unittest.mock.Mock() + test_entry.is_idempotent.return_value = False + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed non-idempotent mutation entry at index 2" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert test_entry.is_idempotent.call_count == 1 + + def test_no_index(self): + """ + Instances without an index should display different error string + """ + test_idx = None + test_entry = unittest.mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed idempotent mutation entry" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) + assert test_entry.is_idempotent.call_count == 1 + + +class TestFailedQueryShardError: + def _get_class(self): + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + return FailedQueryShardError + + def _make_one(self, idx=9, query=mock.Mock(), cause=RuntimeError("mock")): + return self._get_class()(idx, query, cause) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_idx = 2 + test_query = mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_query, test_exc) + assert str(e.value) == "Failed query at index 2" + assert e.value.index == test_idx + assert e.value.query == test_query + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) diff --git a/tests/unit/data/test_helpers.py b/tests/unit/data/test_helpers.py new file mode 100644 index 000000000..5d1ad70f8 --- /dev/null +++ b/tests/unit/data/test_helpers.py @@ -0,0 +1,45 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +from google.cloud.bigtable.helpers import batched + + +class TestBatched: + @pytest.mark.parametrize( + "input_list,batch_size,expected", + [ + ([1, 2, 3, 4, 5], 3, [[1, 2, 3], [4, 5]]), + ([1, 2, 3, 4, 5, 6], 3, [[1, 2, 3], [4, 5, 6]]), + ([1, 2, 3, 4, 5], 2, [[1, 2], [3, 4], [5]]), + ([1, 2, 3, 4, 5], 1, [[1], [2], [3], [4], [5]]), + ([1, 2, 3, 4, 5], 5, [[1, 2, 3, 4, 5]]), + ([], 1, []), + ], + ) + def test_batched(self, input_list, batch_size, expected): + result = list(batched(input_list, batch_size)) + assert list(map(list, result)) == expected + + @pytest.mark.parametrize( + "input_list,batch_size", + [ + ([1], 0), + ([1], -1), + ], + ) + def test_batched_errs(self, input_list, batch_size): + with pytest.raises(ValueError): + list(batched(input_list, batch_size)) diff --git a/tests/unit/data/test_mutations.py b/tests/unit/data/test_mutations.py new file mode 100644 index 000000000..17050162c --- /dev/null +++ b/tests/unit/data/test_mutations.py @@ -0,0 +1,822 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import google.cloud.bigtable.data.mutations as mutations + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TestBaseMutation: + def _target_class(self): + from google.cloud.bigtable.data.mutations import Mutation + + return Mutation + + def test__to_dict(self): + """Should be unimplemented in the base class""" + with pytest.raises(NotImplementedError): + self._target_class()._to_dict(mock.Mock()) + + def test_is_idempotent(self): + """is_idempotent should assume True""" + assert self._target_class().is_idempotent(mock.Mock()) + + def test___str__(self): + """Str representation of mutations should be to_dict""" + self_mock = mock.Mock() + str_value = self._target_class().__str__(self_mock) + assert self_mock._to_dict.called + assert str_value == str(self_mock._to_dict.return_value) + + @pytest.mark.parametrize("test_dict", [{}, {"key": "value"}]) + def test_size(self, test_dict): + from sys import getsizeof + + """Size should return size of dict representation""" + self_mock = mock.Mock() + self_mock._to_dict.return_value = test_dict + size_value = self._target_class().size(self_mock) + assert size_value == getsizeof(test_dict) + + @pytest.mark.parametrize( + "expected_class,input_dict", + [ + ( + mutations.SetCell, + { + "set_cell": { + "family_name": "foo", + "column_qualifier": b"bar", + "value": b"test", + "timestamp_micros": 12345, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {"start_timestamp_micros": 123456789}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {"end_timestamp_micros": 123456789}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": { + "start_timestamp_micros": 123, + "end_timestamp_micros": 123456789, + }, + } + }, + ), + ( + mutations.DeleteAllFromFamily, + {"delete_from_family": {"family_name": "foo"}}, + ), + (mutations.DeleteAllFromRow, {"delete_from_row": {}}), + ( + mutations.AddToCell, + { + "add_to_cell": { + "family_name": "foo", + "column_qualifier": {"raw_value": b"bar"}, + "timestamp": {"raw_timestamp_micros": 12345}, + "input": {"int_value": 123}, + } + }, + ), + ], + ) + def test__from_dict(self, expected_class, input_dict): + """Should be able to create instance from dict""" + instance = self._target_class()._from_dict(input_dict) + assert isinstance(instance, expected_class) + found_dict = instance._to_dict() + assert found_dict == input_dict + + @pytest.mark.parametrize( + "input_dict", + [ + {"set_cell": {}}, + { + "set_cell": { + "column_qualifier": b"bar", + "value": b"test", + "timestamp_micros": 12345, + } + }, + { + "set_cell": { + "family_name": "f", + "column_qualifier": b"bar", + "value": b"test", + } + }, + {"delete_from_family": {}}, + {"delete_from_column": {}}, + {"fake-type"}, + {}, + ], + ) + def test__from_dict_missing_fields(self, input_dict): + """If dict is malformed or fields are missing, should raise ValueError""" + with pytest.raises(ValueError): + self._target_class()._from_dict(input_dict) + + def test__from_dict_wrong_subclass(self): + """You shouldn't be able to instantiate one mutation type using the dict of another""" + subclasses = [ + mutations.SetCell("foo", b"bar", b"test"), + mutations.DeleteRangeFromColumn("foo", b"bar"), + mutations.DeleteAllFromFamily("foo"), + mutations.DeleteAllFromRow(), + mutations.AddToCell("foo", b"bar", 123, 456), + ] + for instance in subclasses: + others = [other for other in subclasses if other != instance] + for other in others: + with pytest.raises(ValueError) as e: + type(other)._from_dict(instance._to_dict()) + assert "Mutation type mismatch" in str(e.value) + + +class TestSetCell: + def _target_class(self): + from google.cloud.bigtable.data.mutations import SetCell + + return SetCell + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.mark.parametrize("input_val", [2**64, -(2**64)]) + def test_ctor_large_int(self, input_val): + with pytest.raises(ValueError) as e: + self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert "int values must be between" in str(e.value) + + @pytest.mark.parametrize("input_val", ["", "a", "abc", "hello world!"]) + def test_ctor_str_value(self, input_val): + found = self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert found.new_value == input_val.encode("utf-8") + + def test_ctor(self): + """Ensure constructor sets expected values""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 1234567890 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_value + assert instance.timestamp_micros == expected_timestamp + + def test_ctor_str_inputs(self): + """Test with string qualifier and value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + instance = self._make_one(expected_family, "test-qualifier", "test-value") + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_value + + @pytest.mark.parametrize("input_val", [-20, -1, 0, 1, 100, int(2**60)]) + def test_ctor_int_value(self, input_val): + found = self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert found.new_value == input_val.to_bytes(8, "big", signed=True) + + @pytest.mark.parametrize( + "int_value,expected_bytes", + [ + (-42, b"\xff\xff\xff\xff\xff\xff\xff\xd6"), + (-2, b"\xff\xff\xff\xff\xff\xff\xff\xfe"), + (-1, b"\xff\xff\xff\xff\xff\xff\xff\xff"), + (0, b"\x00\x00\x00\x00\x00\x00\x00\x00"), + (1, b"\x00\x00\x00\x00\x00\x00\x00\x01"), + (2, b"\x00\x00\x00\x00\x00\x00\x00\x02"), + (100, b"\x00\x00\x00\x00\x00\x00\x00d"), + ], + ) + def test_ctor_int_value_bytes(self, int_value, expected_bytes): + """Test with int value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier, int_value) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_bytes + + def test_ctor_negative_timestamp(self): + """Only positive or -1 timestamps are valid""" + with pytest.raises(ValueError) as e: + self._make_one("test-family", b"test-qualifier", b"test-value", -2) + assert ( + "timestamp_micros must be positive (or -1 for server-side timestamp)" + in str(e.value) + ) + + @pytest.mark.parametrize( + "timestamp_ns,expected_timestamp_micros", + [ + (0, 0), + (1, 0), + (123, 0), + (999, 0), + (999_999, 0), + (1_000_000, 1000), + (1_234_567, 1000), + (1_999_999, 1000), + (2_000_000, 2000), + (1_234_567_890_123, 1_234_567_000), + ], + ) + def test_ctor_no_timestamp(self, timestamp_ns, expected_timestamp_micros): + """If no timestamp is given, should use current time with millisecond precision""" + with mock.patch("time.time_ns", return_value=timestamp_ns): + instance = self._make_one("test-family", b"test-qualifier", b"test-value") + assert instance.timestamp_micros == expected_timestamp_micros + + def test__to_dict(self): + """ensure dict representation is as expected""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["set_cell"] + got_inner_dict = got_dict["set_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + assert got_inner_dict["timestamp_micros"] == expected_timestamp + assert got_inner_dict["value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_dict_server_timestamp(self): + """test with server side timestamp -1 value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = -1 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["set_cell"] + got_inner_dict = got_dict["set_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + assert got_inner_dict["timestamp_micros"] == expected_timestamp + assert got_inner_dict["value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.set_cell.family_name == expected_family + assert got_pb.set_cell.column_qualifier == expected_qualifier + assert got_pb.set_cell.timestamp_micros == expected_timestamp + assert got_pb.set_cell.value == expected_value + + def test__to_pb_server_timestamp(self): + """test with server side timestamp -1 value""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = -1 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.set_cell.family_name == expected_family + assert got_pb.set_cell.column_qualifier == expected_qualifier + assert got_pb.set_cell.timestamp_micros == expected_timestamp + assert got_pb.set_cell.value == expected_value + + @pytest.mark.parametrize( + "timestamp,expected_value", + [ + (1234567890, True), + (1, True), + (0, True), + (-1, False), + (None, True), + ], + ) + def test_is_idempotent(self, timestamp, expected_value): + """is_idempotent is based on whether an explicit timestamp is set""" + instance = self._make_one( + "test-family", b"test-qualifier", b"test-value", timestamp + ) + assert instance.is_idempotent() is expected_value + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one( + "test-family", b"test-qualifier", b"test-value", 1234567890 + ) + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteRangeFromColumn: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn + + return DeleteRangeFromColumn + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_start = 1234567890 + expected_end = 1234567891 + instance = self._make_one( + expected_family, expected_qualifier, expected_start, expected_end + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.start_timestamp_micros == expected_start + assert instance.end_timestamp_micros == expected_end + + def test_ctor_no_timestamps(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.start_timestamp_micros is None + assert instance.end_timestamp_micros is None + + def test_ctor_timestamps_out_of_order(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_start = 10 + expected_end = 1 + with pytest.raises(ValueError) as excinfo: + self._make_one( + expected_family, expected_qualifier, expected_start, expected_end + ) + assert "start_timestamp_micros must be <= end_timestamp_micros" in str( + excinfo.value + ) + + @pytest.mark.parametrize( + "start,end", + [ + (0, 1), + (None, 1), + (0, None), + ], + ) + def test__to_dict(self, start, end): + """Should be unimplemented in the base class""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + + instance = self._make_one(expected_family, expected_qualifier, start, end) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_column"] + got_inner_dict = got_dict["delete_from_column"] + assert len(got_inner_dict.keys()) == 3 + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + time_range_dict = got_inner_dict["time_range"] + expected_len = int(isinstance(start, int)) + int(isinstance(end, int)) + assert len(time_range_dict.keys()) == expected_len + if start is not None: + assert time_range_dict["start_timestamp_micros"] == start + if end is not None: + assert time_range_dict["end_timestamp_micros"] == end + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.delete_from_column.family_name == expected_family + assert got_pb.delete_from_column.column_qualifier == expected_qualifier + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one( + "test-family", b"test-qualifier", 1234567890, 1234567891 + ) + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family", b"test-qualifier") + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteAllFromFamily: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily + + return DeleteAllFromFamily + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + expected_family = "test-family" + instance = self._make_one(expected_family) + assert instance.family_to_delete == expected_family + + def test__to_dict(self): + """Should be unimplemented in the base class""" + expected_family = "test-family" + instance = self._make_one(expected_family) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_family"] + got_inner_dict = got_dict["delete_from_family"] + assert len(got_inner_dict.keys()) == 1 + assert got_inner_dict["family_name"] == expected_family + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + instance = self._make_one(expected_family) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.delete_from_family.family_name == expected_family + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one("test-family") + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family") + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteFromRow: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + return DeleteAllFromRow + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + self._make_one() + + def test__to_dict(self): + """Should be unimplemented in the base class""" + instance = self._make_one() + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_row"] + assert len(got_dict["delete_from_row"].keys()) == 0 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._make_one() + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert "delete_from_row" in str(got_pb) + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one() + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one() + assert instance.__str__() == "{'delete_from_row': {}}" + + +class TestRowMutationEntry: + def _target_class(self): + from google.cloud.bigtable.data.mutations import RowMutationEntry + + return RowMutationEntry + + def _make_one(self, row_key, mutations): + return self._target_class()(row_key, mutations) + + def test_ctor(self): + expected_key = b"row_key" + expected_mutations = [mock.Mock()] + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == expected_key + assert list(instance.mutations) == expected_mutations + + def test_ctor_over_limit(self): + """Should raise error if mutations exceed MAX_MUTATIONS_PER_ENTRY""" + from google.cloud.bigtable.data.mutations import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100_000 + # no errors at limit + expected_mutations = [None for _ in range(_MUTATE_ROWS_REQUEST_MUTATION_LIMIT)] + self._make_one(b"row_key", expected_mutations) + # error if over limit + with pytest.raises(ValueError) as e: + self._make_one("key", expected_mutations + [mock.Mock()]) + assert "entries must have <= 100000 mutations" in str(e.value) + + def test_ctor_str_key(self): + expected_key = "row_key" + expected_mutations = [mock.Mock(), mock.Mock()] + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == b"row_key" + assert list(instance.mutations) == expected_mutations + + def test_ctor_single_mutation(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + expected_key = b"row_key" + expected_mutations = DeleteAllFromRow() + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == expected_key + assert instance.mutations == (expected_mutations,) + + def test__to_dict(self): + expected_key = "row_key" + mutation_mock = mock.Mock() + n_mutations = 3 + expected_mutations = [mutation_mock for i in range(n_mutations)] + for mock_mutations in expected_mutations: + mock_mutations._to_dict.return_value = {"test": "data"} + instance = self._make_one(expected_key, expected_mutations) + expected_result = { + "row_key": b"row_key", + "mutations": [{"test": "data"}] * n_mutations, + } + assert instance._to_dict() == expected_result + assert mutation_mock._to_dict.call_count == n_mutations + + def test__to_pb(self): + from google.cloud.bigtable_v2.types.bigtable import MutateRowsRequest + from google.cloud.bigtable_v2.types.data import Mutation + + expected_key = "row_key" + mutation_mock = mock.Mock() + n_mutations = 3 + expected_mutations = [mutation_mock for i in range(n_mutations)] + for mock_mutations in expected_mutations: + mock_mutations._to_pb.return_value = Mutation() + instance = self._make_one(expected_key, expected_mutations) + pb_result = instance._to_pb() + assert isinstance(pb_result, MutateRowsRequest.Entry) + assert pb_result.row_key == b"row_key" + assert pb_result.mutations == [Mutation()] * n_mutations + assert mutation_mock._to_pb.call_count == n_mutations + + @pytest.mark.parametrize( + "mutations,result", + [ + ([mock.Mock(is_idempotent=lambda: True)], True), + ([mock.Mock(is_idempotent=lambda: False)], False), + ( + [ + mock.Mock(is_idempotent=lambda: True), + mock.Mock(is_idempotent=lambda: False), + ], + False, + ), + ( + [ + mock.Mock(is_idempotent=lambda: True), + mock.Mock(is_idempotent=lambda: True), + ], + True, + ), + ], + ) + def test_is_idempotent(self, mutations, result): + instance = self._make_one("row_key", mutations) + assert instance.is_idempotent() == result + + def test_empty_mutations(self): + with pytest.raises(ValueError) as e: + self._make_one("row_key", []) + assert "must not be empty" in str(e.value) + + @pytest.mark.parametrize("test_dict", [{}, {"key": "value"}]) + def test_size(self, test_dict): + from sys import getsizeof + + """Size should return size of dict representation""" + self_mock = mock.Mock() + self_mock._to_dict.return_value = test_dict + size_value = self._target_class().size(self_mock) + assert size_value == getsizeof(test_dict) + + def test__from_dict_mock(self): + """ + test creating instance from entry dict, with mocked mutation._from_dict + """ + expected_key = b"row_key" + expected_mutations = [mock.Mock(), mock.Mock()] + input_dict = { + "row_key": expected_key, + "mutations": [{"test": "data"}, {"another": "data"}], + } + with mock.patch.object(mutations.Mutation, "_from_dict") as inner_from_dict: + inner_from_dict.side_effect = expected_mutations + instance = self._target_class()._from_dict(input_dict) + assert instance.row_key == b"row_key" + assert inner_from_dict.call_count == 2 + assert len(instance.mutations) == 2 + assert instance.mutations[0] == expected_mutations[0] + assert instance.mutations[1] == expected_mutations[1] + + def test__from_dict(self): + """ + test creating end-to-end with a real mutation instance + """ + input_dict = { + "row_key": b"row_key", + "mutations": [{"delete_from_family": {"family_name": "test_family"}}], + } + instance = self._target_class()._from_dict(input_dict) + assert instance.row_key == b"row_key" + assert len(instance.mutations) == 1 + assert isinstance(instance.mutations[0], mutations.DeleteAllFromFamily) + assert instance.mutations[0].family_to_delete == "test_family" + + +class TestAddToCell: + def _target_class(self): + from google.cloud.bigtable.data.mutations import AddToCell + + return AddToCell + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.mark.parametrize("input_val", [2**64, -(2**64)]) + def test_ctor_large_int(self, input_val): + with pytest.raises(ValueError) as e: + self._make_one( + family="f", qualifier=b"b", value=input_val, timestamp_micros=123 + ) + assert "int values must be between" in str(e.value) + + @pytest.mark.parametrize("input_val", ["", "a", "abc", "hello world!"]) + def test_ctor_str_value(self, input_val): + with pytest.raises(TypeError) as e: + self._make_one( + family="f", qualifier=b"b", value=input_val, timestamp_micros=123 + ) + assert "value must be int" in str(e.value) + + def test_ctor(self): + """Ensure constructor sets expected values""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 1234567890 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.value == expected_value + assert instance.timestamp == expected_timestamp + + def test_ctor_negative_timestamp(self): + """Only non-negative timestamps are valid""" + with pytest.raises(ValueError) as e: + self._make_one("test-family", b"test-qualifier", 1234, -2) + assert "timestamp must be non-negative" in str(e.value) + + def test__to_dict(self): + """ensure dict representation is as expected""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["add_to_cell"] + got_inner_dict = got_dict["add_to_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"]["raw_value"] == expected_qualifier + assert got_inner_dict["timestamp"]["raw_timestamp_micros"] == expected_timestamp + assert got_inner_dict["input"]["int_value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.add_to_cell.family_name == expected_family + assert got_pb.add_to_cell.column_qualifier.raw_value == expected_qualifier + assert got_pb.add_to_cell.timestamp.raw_timestamp_micros == expected_timestamp + assert got_pb.add_to_cell.input.int_value == expected_value + + @pytest.mark.parametrize( + "timestamp", + [ + (1234567890), + (1), + (0), + ], + ) + def test_is_idempotent(self, timestamp): + """is_idempotent is not based on the timestamp""" + instance = self._make_one("test-family", b"test-qualifier", 1234, timestamp) + assert not instance.is_idempotent() + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family", b"test-qualifier", 1234, 1234567890) + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) diff --git a/tests/unit/data/test_read_modify_write_rules.py b/tests/unit/data/test_read_modify_write_rules.py new file mode 100644 index 000000000..1f67da13b --- /dev/null +++ b/tests/unit/data/test_read_modify_write_rules.py @@ -0,0 +1,186 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TestBaseReadModifyWriteRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import ( + ReadModifyWriteRule, + ) + + return ReadModifyWriteRule + + def test_abstract(self): + """should not be able to instantiate""" + with pytest.raises(TypeError): + self._target_class()(family="foo", qualifier=b"bar") + + def test__to_dict(self): + """ + to_dict not implemented in base class + """ + with pytest.raises(NotImplementedError): + self._target_class()._to_dict(mock.Mock()) + + +class TestIncrementRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + return IncrementRule + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test_ctor(self, args, expected): + instance = self._target_class()(*args) + assert instance.family == expected[0] + assert instance.qualifier == expected[1] + assert instance.increment_amount == expected[2] + + @pytest.mark.parametrize("input_amount", [1.1, None, "1", object(), "", b"", b"1"]) + def test_ctor_bad_input(self, input_amount): + with pytest.raises(TypeError) as e: + self._target_class()("fam", b"qual", input_amount) + assert "increment_amount must be an integer" in str(e.value) + + @pytest.mark.parametrize( + "large_value", [2**64, 2**64 + 1, -(2**64), -(2**64) - 1] + ) + def test_ctor_large_values(self, large_value): + with pytest.raises(ValueError) as e: + self._target_class()("fam", b"qual", large_value) + assert "too large" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test__to_dict(self, args, expected): + instance = self._target_class()(*args) + expected = { + "family_name": expected[0], + "column_qualifier": expected[1], + "increment_amount": expected[2], + } + assert instance._to_dict() == expected + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test__to_pb(self, args, expected): + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._target_class()(*args) + pb_result = instance._to_pb() + assert isinstance(pb_result, data_pb.ReadModifyWriteRule) + assert pb_result.family_name == expected[0] + assert pb_result.column_qualifier == expected[1] + assert pb_result.increment_amount == expected[2] + + +class TestAppendValueRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + return AppendValueRule + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + (("f", "q", "str_val"), ("f", b"q", b"str_val")), + (("f", "q", ""), ("f", b"q", b"")), + ], + ) + def test_ctor(self, args, expected): + instance = self._target_class()(*args) + assert instance.family == expected[0] + assert instance.qualifier == expected[1] + assert instance.append_value == expected[2] + + @pytest.mark.parametrize("input_val", [5, 1.1, None, object()]) + def test_ctor_bad_input(self, input_val): + with pytest.raises(TypeError) as e: + self._target_class()("fam", b"qual", input_val) + assert "append_value must be bytes or str" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + ], + ) + def test__to_dict(self, args, expected): + instance = self._target_class()(*args) + expected = { + "family_name": expected[0], + "column_qualifier": expected[1], + "append_value": expected[2], + } + assert instance._to_dict() == expected + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + ], + ) + def test__to_pb(self, args, expected): + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._target_class()(*args) + pb_result = instance._to_pb() + assert isinstance(pb_result, data_pb.ReadModifyWriteRule) + assert pb_result.family_name == expected[0] + assert pb_result.column_qualifier == expected[1] + assert pb_result.append_value == expected[2] diff --git a/tests/unit/data/test_read_rows_query.py b/tests/unit/data/test_read_rows_query.py new file mode 100644 index 000000000..ba3b0468b --- /dev/null +++ b/tests/unit/data/test_read_rows_query.py @@ -0,0 +1,589 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +TEST_ROWS = [ + "row_key_1", + b"row_key_2", +] + + +class TestRowRange: + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.read_rows_query import RowRange + + return RowRange + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor_start_end(self): + row_range = self._make_one("test_row", "test_row2") + assert row_range.start_key == "test_row".encode() + assert row_range.end_key == "test_row2".encode() + assert row_range.start_is_inclusive is True + assert row_range.end_is_inclusive is False + + def test_ctor_start_only(self): + row_range = self._make_one("test_row3") + assert row_range.start_key == "test_row3".encode() + assert row_range.start_is_inclusive is True + assert row_range.end_key is None + assert row_range.end_is_inclusive is True + + def test_ctor_end_only(self): + row_range = self._make_one(end_key="test_row4") + assert row_range.end_key == "test_row4".encode() + assert row_range.end_is_inclusive is False + assert row_range.start_key is None + assert row_range.start_is_inclusive is True + + def test_ctor_empty_strings(self): + """ + empty strings should be treated as None + """ + row_range = self._make_one("", "") + assert row_range.start_key is None + assert row_range.end_key is None + assert row_range.start_is_inclusive is True + assert row_range.end_is_inclusive is True + + def test_ctor_inclusive_flags(self): + row_range = self._make_one("test_row5", "test_row6", False, True) + assert row_range.start_key == "test_row5".encode() + assert row_range.end_key == "test_row6".encode() + assert row_range.start_is_inclusive is False + assert row_range.end_is_inclusive is True + + def test_ctor_defaults(self): + row_range = self._make_one() + assert row_range.start_key is None + assert row_range.end_key is None + + def test_ctor_invalid_keys(self): + # test with invalid keys + with pytest.raises(ValueError) as exc: + self._make_one(1, "2") + assert str(exc.value) == "start_key must be a string or bytes" + with pytest.raises(ValueError) as exc: + self._make_one("1", 2) + assert str(exc.value) == "end_key must be a string or bytes" + with pytest.raises(ValueError) as exc: + self._make_one("2", "1") + assert str(exc.value) == "start_key must be less than or equal to end_key" + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ({"start_key_closed": "test_row", "end_key_open": "test_row2"}, True), + ({"start_key_closed": b"test_row", "end_key_open": b"test_row2"}, True), + ({"start_key_open": "test_row", "end_key_closed": "test_row2"}, True), + ({"start_key_open": b"a"}, True), + ({"end_key_closed": b"b"}, True), + ({"start_key_closed": "a"}, True), + ({"end_key_open": b"b"}, True), + ({}, False), + ], + ) + def test___bool__(self, dict_repr, expected): + """ + Only row range with both points empty should be falsy + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert bool(row_range) is expected + + def test__eq__(self): + """ + test that row ranges can be compared for equality + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + range1 = RowRange("1", "2") + range1_dup = RowRange("1", "2") + range2 = RowRange("1", "3") + range_w_empty = RowRange(None, "2") + assert range1 == range1_dup + assert range1 != range2 + assert range1 != range_w_empty + range_1_w_inclusive_start = RowRange("1", "2", start_is_inclusive=True) + range_1_w_exclusive_start = RowRange("1", "2", start_is_inclusive=False) + range_1_w_inclusive_end = RowRange("1", "2", end_is_inclusive=True) + range_1_w_exclusive_end = RowRange("1", "2", end_is_inclusive=False) + assert range1 == range_1_w_inclusive_start + assert range1 == range_1_w_exclusive_end + assert range1 != range_1_w_exclusive_start + assert range1 != range_1_w_inclusive_end + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ( + {"start_key_closed": "test_row", "end_key_open": "test_row2"}, + "[b'test_row', b'test_row2')", + ), + ( + {"start_key_open": "test_row", "end_key_closed": "test_row2"}, + "(b'test_row', b'test_row2']", + ), + ({"start_key_open": b"a"}, "(b'a', +inf]"), + ({"end_key_closed": b"b"}, "[-inf, b'b']"), + ({"end_key_open": b"b"}, "[-inf, b'b')"), + ({}, "[-inf, +inf]"), + ], + ) + def test___str__(self, dict_repr, expected): + """ + test string representations of row ranges + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert str(row_range) == expected + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ( + {"start_key_closed": "test_row", "end_key_open": "test_row2"}, + "RowRange(start_key=b'test_row', end_key=b'test_row2')", + ), + ( + {"start_key_open": "test_row", "end_key_closed": "test_row2"}, + "RowRange(start_key=b'test_row', end_key=b'test_row2', start_is_inclusive=False, end_is_inclusive=True)", + ), + ( + {"start_key_open": b"a"}, + "RowRange(start_key=b'a', end_key=None, start_is_inclusive=False)", + ), + ( + {"end_key_closed": b"b"}, + "RowRange(start_key=None, end_key=b'b', end_is_inclusive=True)", + ), + ({"end_key_open": b"b"}, "RowRange(start_key=None, end_key=b'b')"), + ({}, "RowRange(start_key=None, end_key=None)"), + ], + ) + def test___repr__(self, dict_repr, expected): + """ + test repr representations of row ranges + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert repr(row_range) == expected + + +class TestReadRowsQuery: + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + return ReadRowsQuery + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor_defaults(self): + query = self._make_one() + assert query.row_keys == list() + assert query.row_ranges == list() + assert query.filter is None + assert query.limit is None + + def test_ctor_explicit(self): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.read_rows_query import RowRange + + filter_ = RowFilterChain() + query = self._make_one( + ["row_key_1", "row_key_2"], + row_ranges=[RowRange("row_key_3", "row_key_4")], + limit=10, + row_filter=filter_, + ) + assert len(query.row_keys) == 2 + assert "row_key_1".encode() in query.row_keys + assert "row_key_2".encode() in query.row_keys + assert len(query.row_ranges) == 1 + assert RowRange("row_key_3", "row_key_4") in query.row_ranges + assert query.filter == filter_ + assert query.limit == 10 + + def test_ctor_invalid_limit(self): + with pytest.raises(ValueError) as exc: + self._make_one(limit=-1) + assert str(exc.value) == "limit must be >= 0" + + def test_set_filter(self): + from google.cloud.bigtable.data.row_filters import RowFilterChain + + filter1 = RowFilterChain() + query = self._make_one() + assert query.filter is None + query.filter = filter1 + assert query.filter == filter1 + filter2 = RowFilterChain() + query.filter = filter2 + assert query.filter == filter2 + query.filter = None + assert query.filter is None + query.filter = RowFilterChain() + assert query.filter == RowFilterChain() + + def test_set_limit(self): + query = self._make_one() + assert query.limit is None + query.limit = 10 + assert query.limit == 10 + query.limit = 9 + assert query.limit == 9 + query.limit = 0 + assert query.limit is None + with pytest.raises(ValueError) as exc: + query.limit = -1 + assert str(exc.value) == "limit must be >= 0" + with pytest.raises(ValueError) as exc: + query.limit = -100 + assert str(exc.value) == "limit must be >= 0" + + def test_add_key_str(self): + query = self._make_one() + assert query.row_keys == list() + input_str = "test_row" + query.add_key(input_str) + assert len(query.row_keys) == 1 + assert input_str.encode() in query.row_keys + input_str2 = "test_row2" + query.add_key(input_str2) + assert len(query.row_keys) == 2 + assert input_str.encode() in query.row_keys + assert input_str2.encode() in query.row_keys + + def test_add_key_bytes(self): + query = self._make_one() + assert query.row_keys == list() + input_bytes = b"test_row" + query.add_key(input_bytes) + assert len(query.row_keys) == 1 + assert input_bytes in query.row_keys + input_bytes2 = b"test_row2" + query.add_key(input_bytes2) + assert len(query.row_keys) == 2 + assert input_bytes in query.row_keys + assert input_bytes2 in query.row_keys + + def test_add_rows_batch(self): + query = self._make_one() + assert query.row_keys == list() + input_batch = ["test_row", b"test_row2", "test_row3"] + for k in input_batch: + query.add_key(k) + assert len(query.row_keys) == 3 + assert b"test_row" in query.row_keys + assert b"test_row2" in query.row_keys + assert b"test_row3" in query.row_keys + # test adding another batch + for k in ["test_row4", b"test_row5"]: + query.add_key(k) + assert len(query.row_keys) == 5 + assert input_batch[0].encode() in query.row_keys + assert input_batch[1] in query.row_keys + assert input_batch[2].encode() in query.row_keys + assert b"test_row4" in query.row_keys + assert b"test_row5" in query.row_keys + + def test_add_key_invalid(self): + query = self._make_one() + with pytest.raises(ValueError) as exc: + query.add_key(1) + assert str(exc.value) == "row_key must be string or bytes" + with pytest.raises(ValueError) as exc: + query.add_key(["s"]) + assert str(exc.value) == "row_key must be string or bytes" + + def test_add_range(self): + from google.cloud.bigtable.data.read_rows_query import RowRange + + query = self._make_one() + assert query.row_ranges == list() + input_range = RowRange(start_key=b"test_row") + query.add_range(input_range) + assert len(query.row_ranges) == 1 + assert input_range in query.row_ranges + input_range2 = RowRange(start_key=b"test_row2") + query.add_range(input_range2) + assert len(query.row_ranges) == 2 + assert input_range in query.row_ranges + assert input_range2 in query.row_ranges + + def _parse_query_string(self, query_string): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery, RowRange + + query = ReadRowsQuery() + segments = query_string.split(",") + for segment in segments: + if "-" in segment: + start, end = segment.split("-") + s_open, e_open = True, True + if start == "": + start = None + s_open = None + else: + if start[0] == "(": + s_open = False + start = start[1:] + if end == "": + end = None + e_open = None + else: + if end[-1] == ")": + e_open = False + end = end[:-1] + query.add_range(RowRange(start, end, s_open, e_open)) + else: + query.add_key(segment) + return query + + @pytest.mark.parametrize( + "query_string,shard_points", + [ + ("a,[p-q)", []), + ("0_key,[1_range_start-2_range_end)", ["3_split"]), + ("0_key,[1_range_start-2_range_end)", ["2_range_end"]), + ("0_key,[1_range_start-2_range_end]", ["2_range_end"]), + ("-1_range_end)", ["5_split"]), + ("8_key,(1_range_start-2_range_end]", ["1_range_start"]), + ("9_row_key,(5_range_start-7_range_end)", ["3_split"]), + ("3_row_key,(5_range_start-7_range_end)", ["2_row_key"]), + ("4_split,4_split,(3_split-5_split]", ["3_split", "5_split"]), + ("(3_split-", ["3_split"]), + ], + ) + def test_shard_no_split(self, query_string, shard_points): + """ + Test sharding with a set of queries that should not result in any splits. + """ + initial_query = self._parse_query_string(query_string) + row_samples = [(point.encode(), None) for point in shard_points] + sharded_queries = initial_query.shard(row_samples) + assert len(sharded_queries) == 1 + assert initial_query == sharded_queries[0] + + def test_shard_full_table_scan_empty_split(self): + """ + Sharding a full table scan with no split should return another full table scan. + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 1 + result_query = sharded_queries[0] + assert result_query == full_scan_query + + def test_shard_full_table_scan_with_split(self): + """ + Test splitting a full table scan into two queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [(b"a", None)] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("-a]") + assert sharded_queries[1] == self._parse_query_string("(a-") + + def test_shard_full_table_scan_with_multiple_split(self): + """ + Test splitting a full table scan into three queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [(b"a", None), (b"z", None)] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 3 + assert sharded_queries[0] == self._parse_query_string("-a]") + assert sharded_queries[1] == self._parse_query_string("(a-z]") + assert sharded_queries[2] == self._parse_query_string("(z-") + + def test_shard_multiple_keys(self): + """ + Test splitting multiple individual keys into separate queries + """ + initial_query = self._parse_query_string("1_beforeSplit,2_onSplit,3_afterSplit") + split_points = [(b"2_onSplit", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("1_beforeSplit,2_onSplit") + assert sharded_queries[1] == self._parse_query_string("3_afterSplit") + + def test_shard_keys_empty_left(self): + """ + Test with the left-most split point empty + """ + initial_query = self._parse_query_string("5_test,8_test") + split_points = [(b"0_split", None), (b"6_split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("5_test") + assert sharded_queries[1] == self._parse_query_string("8_test") + + def test_shard_keys_empty_right(self): + """ + Test with the right-most split point empty + """ + initial_query = self._parse_query_string("0_test,2_test") + split_points = [(b"1_split", None), (b"5_split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("0_test") + assert sharded_queries[1] == self._parse_query_string("2_test") + + def test_shard_mixed_split(self): + """ + Test splitting a complex query with multiple split points + """ + initial_query = self._parse_query_string("0,a,c,-a],-b],(c-e],(d-f],(m-") + split_points = [(s.encode(), None) for s in ["a", "d", "j", "o"]] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 5 + assert sharded_queries[0] == self._parse_query_string("0,a,-a]") + assert sharded_queries[1] == self._parse_query_string("c,(a-b],(c-d]") + assert sharded_queries[2] == self._parse_query_string("(d-e],(d-f]") + assert sharded_queries[3] == self._parse_query_string("(m-o]") + assert sharded_queries[4] == self._parse_query_string("(o-") + + def test_shard_unsorted_request(self): + """ + Test with a query that contains rows and queries in a random order + """ + initial_query = self._parse_query_string( + "7_row_key_1,2_row_key_2,[8_range_1_start-9_range_1_end),[3_range_2_start-4_range_2_end)" + ) + split_points = [(b"5-split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string( + "2_row_key_2,[3_range_2_start-4_range_2_end)" + ) + assert sharded_queries[1] == self._parse_query_string( + "7_row_key_1,[8_range_1_start-9_range_1_end)" + ) + + @pytest.mark.parametrize( + "query_string,shard_points", + [ + ("a,[p-q)", []), + ("0_key,[1_range_start-2_range_end)", ["3_split"]), + ("-1_range_end)", ["5_split"]), + ("0_key,[1_range_start-2_range_end)", ["2_range_end"]), + ("9_row_key,(5_range_start-7_range_end)", ["3_split"]), + ("(5_range_start-", ["3_split"]), + ("3_split,[3_split-5_split)", ["3_split", "5_split"]), + ("[3_split-", ["3_split"]), + ("", []), + ("", ["3_split"]), + ("", ["3_split", "5_split"]), + ("1,2,3,4,5,6,7,8,9", ["3_split"]), + ], + ) + def test_shard_keeps_filter(self, query_string, shard_points): + """ + sharded queries should keep the filter from the original query + """ + initial_query = self._parse_query_string(query_string) + expected_filter = {"test": "filter"} + initial_query.filter = expected_filter + row_samples = [(point.encode(), None) for point in shard_points] + sharded_queries = initial_query.shard(row_samples) + assert len(sharded_queries) > 0 + for query in sharded_queries: + assert query.filter == expected_filter + + def test_shard_limit_exception(self): + """ + queries with a limit should raise an exception when a shard is attempted + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + query = ReadRowsQuery(limit=10) + with pytest.raises(AttributeError) as e: + query.shard([]) + assert "Cannot shard query with a limit" in str(e.value) + + @pytest.mark.parametrize( + "first_args,second_args,expected", + [ + ((), (), True), + ((), ("a",), False), + (("a",), (), False), + (("a",), ("a",), True), + ((["a"],), (["a", "b"],), False), + ((["a", "b"],), (["a", "b"],), True), + ((["a", b"b"],), ([b"a", "b"],), True), + (("a",), (b"a",), True), + (("a",), ("b",), False), + (("a",), ("a", ["b"]), False), + (("a", "b"), ("a", ["b"]), True), + (("a", ["b"]), ("a", ["b", "c"]), False), + (("a", ["b", "c"]), ("a", [b"b", "c"]), True), + (("a", ["b", "c"], 1), ("a", ["b", b"c"], 1), True), + (("a", ["b"], 1), ("a", ["b"], 2), False), + (("a", ["b"], 1, {"a": "b"}), ("a", ["b"], 1, {"a": "b"}), True), + (("a", ["b"], 1, {"a": "b"}), ("a", ["b"], 1), False), + ( + (), + (None, [None], None, None), + True, + ), # empty query is equal to empty row range + ((), (None, [None], 1, None), False), + ((), (None, [None], None, {"a": "b"}), False), + ], + ) + def test___eq__(self, first_args, second_args, expected): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + # replace row_range placeholders with a RowRange object + if len(first_args) > 1: + first_args = list(first_args) + first_args[1] = [RowRange(c) for c in first_args[1]] + if len(second_args) > 1: + second_args = list(second_args) + second_args[1] = [RowRange(c) for c in second_args[1]] + first = ReadRowsQuery(*first_args) + second = ReadRowsQuery(*second_args) + assert (first == second) == expected + + def test___repr__(self): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + instance = self._make_one(row_keys=["a", "b"], row_filter={}, limit=10) + # should be able to recreate the instance from the repr + repr_str = repr(instance) + recreated = eval(repr_str) + assert isinstance(recreated, ReadRowsQuery) + assert recreated == instance + + def test_empty_row_set(self): + """Empty strings should be treated as keys inputs""" + query = self._make_one(row_keys="") + assert query.row_keys == [b""] diff --git a/tests/unit/data/test_row.py b/tests/unit/data/test_row.py new file mode 100644 index 000000000..10b5bdb23 --- /dev/null +++ b/tests/unit/data/test_row.py @@ -0,0 +1,718 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import time + +TEST_VALUE = b"1234" +TEST_ROW_KEY = b"row" +TEST_FAMILY_ID = "cf1" +TEST_QUALIFIER = b"col" +TEST_TIMESTAMP = time.time_ns() // 1000 +TEST_LABELS = ["label1", "label2"] + + +class TestRow(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.row import Row + + return Row + + def _make_one(self, *args, **kwargs): + if len(args) == 0: + args = (TEST_ROW_KEY, [self._make_cell()]) + return self._get_target_class()(*args, **kwargs) + + def _make_cell( + self, + value=TEST_VALUE, + row_key=TEST_ROW_KEY, + family_id=TEST_FAMILY_ID, + qualifier=TEST_QUALIFIER, + timestamp=TEST_TIMESTAMP, + labels=TEST_LABELS, + ): + from google.cloud.bigtable.data.row import Cell + + return Cell(value, row_key, family_id, qualifier, timestamp, labels) + + def test_ctor(self): + cells = [self._make_cell(), self._make_cell()] + row_response = self._make_one(TEST_ROW_KEY, cells) + self.assertEqual(list(row_response), cells) + self.assertEqual(row_response.row_key, TEST_ROW_KEY) + + def test__from_pb(self): + """ + Construct from protobuf. + """ + from google.cloud.bigtable_v2.types import Row as RowPB + from google.cloud.bigtable_v2.types import Family as FamilyPB + from google.cloud.bigtable_v2.types import Column as ColumnPB + from google.cloud.bigtable_v2.types import Cell as CellPB + + row_key = b"row_key" + cells = [ + CellPB( + value=str(i).encode(), + timestamp_micros=TEST_TIMESTAMP, + labels=TEST_LABELS, + ) + for i in range(2) + ] + column = ColumnPB(qualifier=TEST_QUALIFIER, cells=cells) + families_pb = [FamilyPB(name=TEST_FAMILY_ID, columns=[column])] + row_pb = RowPB(key=row_key, families=families_pb) + output = self._get_target_class()._from_pb(row_pb) + self.assertEqual(output.row_key, row_key) + self.assertEqual(len(output), 2) + self.assertEqual(output[0].value, b"0") + self.assertEqual(output[1].value, b"1") + self.assertEqual(output[0].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(output[0].labels, TEST_LABELS) + assert output[0].row_key == row_key + assert output[0].family == TEST_FAMILY_ID + assert output[0].qualifier == TEST_QUALIFIER + + def test__from_pb_sparse(self): + """ + Construct from minimal protobuf. + """ + from google.cloud.bigtable_v2.types import Row as RowPB + + row_key = b"row_key" + row_pb = RowPB(key=row_key) + output = self._get_target_class()._from_pb(row_pb) + self.assertEqual(output.row_key, row_key) + self.assertEqual(len(output), 0) + + def test_get_cells(self): + cell_list = [] + for family_id in ["1", "2"]: + for qualifier in [b"a", b"b"]: + cell = self._make_cell(family_id=family_id, qualifier=qualifier) + cell_list.append(cell) + # test getting all cells + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(row_response.get_cells(), cell_list) + # test getting cells in a family + output = row_response.get_cells(family="1") + self.assertEqual(len(output), 2) + self.assertEqual(output[0].family, "1") + self.assertEqual(output[1].family, "1") + self.assertEqual(output[0], cell_list[0]) + # test getting cells in a family/qualifier + # should accept bytes or str for qualifier + for q in [b"a", "a"]: + output = row_response.get_cells(family="1", qualifier=q) + self.assertEqual(len(output), 1) + self.assertEqual(output[0].family, "1") + self.assertEqual(output[0].qualifier, b"a") + self.assertEqual(output[0], cell_list[0]) + # calling with just qualifier should raise an error + with self.assertRaises(ValueError): + row_response.get_cells(qualifier=b"a") + # test calling with bad family or qualifier + with self.assertRaises(ValueError): + row_response.get_cells(family="3", qualifier=b"a") + with self.assertRaises(ValueError): + row_response.get_cells(family="3") + with self.assertRaises(ValueError): + row_response.get_cells(family="1", qualifier=b"c") + + def test___repr__(self): + cell_str = ( + "{'value': b'1234', 'timestamp_micros': %d, 'labels': ['label1', 'label2']}" + % (TEST_TIMESTAMP) + ) + expected_prefix = "Row(key=b'row', cells=" + row = self._make_one(TEST_ROW_KEY, [self._make_cell()]) + self.assertIn(expected_prefix, repr(row)) + self.assertIn(cell_str, repr(row)) + expected_full = ( + "Row(key=b'row', cells={\n ('cf1', b'col'): [{'value': b'1234', 'timestamp_micros': %d, 'labels': ['label1', 'label2']}],\n})" + % (TEST_TIMESTAMP) + ) + self.assertEqual(expected_full, repr(row)) + # try with multiple cells + row = self._make_one(TEST_ROW_KEY, [self._make_cell(), self._make_cell()]) + self.assertIn(expected_prefix, repr(row)) + self.assertIn(cell_str, repr(row)) + + def test___str__(self): + cells = [ + self._make_cell(value=b"1234", family_id="1", qualifier=b"col"), + self._make_cell(value=b"5678", family_id="3", qualifier=b"col"), + self._make_cell(value=b"1", family_id="3", qualifier=b"col"), + self._make_cell(value=b"2", family_id="3", qualifier=b"col"), + ] + + row_response = self._make_one(TEST_ROW_KEY, cells) + expected = ( + "{\n" + + " (family='1', qualifier=b'col'): [b'1234'],\n" + + " (family='3', qualifier=b'col'): [b'5678', (+2 more)],\n" + + "}" + ) + self.assertEqual(expected, str(row_response)) + + def test_to_dict(self): + from google.cloud.bigtable_v2.types import Row + + cell1 = self._make_cell() + cell2 = self._make_cell() + cell2.value = b"other" + row = self._make_one(TEST_ROW_KEY, [cell1, cell2]) + row_dict = row._to_dict() + expected_dict = { + "key": TEST_ROW_KEY, + "families": [ + { + "name": TEST_FAMILY_ID, + "columns": [ + { + "qualifier": TEST_QUALIFIER, + "cells": [ + { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + }, + { + "value": b"other", + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + }, + ], + } + ], + }, + ], + } + self.assertEqual(len(row_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(row_dict[key], value) + # should be able to construct a Cell proto from the dict + row_proto = Row(**row_dict) + self.assertEqual(row_proto.key, TEST_ROW_KEY) + self.assertEqual(len(row_proto.families), 1) + family = row_proto.families[0] + self.assertEqual(family.name, TEST_FAMILY_ID) + self.assertEqual(len(family.columns), 1) + column = family.columns[0] + self.assertEqual(column.qualifier, TEST_QUALIFIER) + self.assertEqual(len(column.cells), 2) + self.assertEqual(column.cells[0].value, TEST_VALUE) + self.assertEqual(column.cells[0].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(column.cells[0].labels, TEST_LABELS) + self.assertEqual(column.cells[1].value, cell2.value) + self.assertEqual(column.cells[1].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(column.cells[1].labels, TEST_LABELS) + + def test_iteration(self): + from google.cloud.bigtable.data.row import Cell + + # should be able to iterate over the Row as a list + cell1 = self._make_cell(value=b"1") + cell2 = self._make_cell(value=b"2") + cell3 = self._make_cell(value=b"3") + row_response = self._make_one(TEST_ROW_KEY, [cell1, cell2, cell3]) + self.assertEqual(len(row_response), 3) + result_list = list(row_response) + self.assertEqual(len(result_list), 3) + # should be able to iterate over all cells + idx = 0 + for cell in row_response: + self.assertIsInstance(cell, Cell) + self.assertEqual(cell.value, result_list[idx].value) + self.assertEqual(cell.value, str(idx + 1).encode()) + idx += 1 + + def test_contains_cell(self): + cell3 = self._make_cell(value=b"3") + cell1 = self._make_cell(value=b"1") + cell2 = self._make_cell(value=b"2") + cell4 = self._make_cell(value=b"4") + row_response = self._make_one(TEST_ROW_KEY, [cell3, cell1, cell2]) + self.assertIn(cell1, row_response) + self.assertIn(cell2, row_response) + self.assertNotIn(cell4, row_response) + cell3_copy = self._make_cell(value=b"3") + self.assertIn(cell3_copy, row_response) + + def test_contains_family_id(self): + new_family_id = "new_family_id" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2]) + self.assertIn(TEST_FAMILY_ID, row_response) + self.assertIn("new_family_id", row_response) + self.assertIn(new_family_id, row_response) + self.assertNotIn("not_a_family_id", row_response) + self.assertNotIn(None, row_response) + + def test_contains_family_qualifier_tuple(self): + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2]) + self.assertIn((TEST_FAMILY_ID, TEST_QUALIFIER), row_response) + self.assertIn(("new_family_id", "new_qualifier"), row_response) + self.assertIn(("new_family_id", b"new_qualifier"), row_response) + self.assertIn((new_family_id, new_qualifier), row_response) + + self.assertNotIn(("not_a_family_id", TEST_QUALIFIER), row_response) + self.assertNotIn((TEST_FAMILY_ID, "not_a_qualifier"), row_response) + self.assertNotIn((TEST_FAMILY_ID, new_qualifier), row_response) + self.assertNotIn(("not_a_family_id", "not_a_qualifier"), row_response) + self.assertNotIn((None, None), row_response) + self.assertNotIn(None, row_response) + + def test_int_indexing(self): + # should be able to index into underlying list with an index number directly + cell_list = [self._make_cell(value=str(i).encode()) for i in range(10)] + sorted(cell_list) + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(len(row_response), 10) + for i in range(10): + self.assertEqual(row_response[i].value, str(i).encode()) + # backwards indexing should work + self.assertEqual(row_response[-i - 1].value, str(9 - i).encode()) + with self.assertRaises(IndexError): + row_response[10] + with self.assertRaises(IndexError): + row_response[-11] + + def test_slice_indexing(self): + # should be able to index with a range of indices + cell_list = [self._make_cell(value=str(i).encode()) for i in range(10)] + sorted(cell_list) + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(len(row_response), 10) + self.assertEqual(len(row_response[0:10]), 10) + self.assertEqual(row_response[0:10], cell_list) + self.assertEqual(len(row_response[0:]), 10) + self.assertEqual(row_response[0:], cell_list) + self.assertEqual(len(row_response[:10]), 10) + self.assertEqual(row_response[:10], cell_list) + self.assertEqual(len(row_response[0:10:1]), 10) + self.assertEqual(row_response[0:10:1], cell_list) + self.assertEqual(len(row_response[0:10:2]), 5) + self.assertEqual(row_response[0:10:2], [cell_list[i] for i in range(0, 10, 2)]) + self.assertEqual(len(row_response[0:10:3]), 4) + self.assertEqual(row_response[0:10:3], [cell_list[i] for i in range(0, 10, 3)]) + self.assertEqual(len(row_response[10:0:-1]), 9) + self.assertEqual(len(row_response[10:0:-2]), 5) + self.assertEqual(row_response[10:0:-3], cell_list[10:0:-3]) + self.assertEqual(len(row_response[0:100]), 10) + + def test_family_indexing(self): + # should be able to retrieve cells in a family + new_family_id = "new_family_id" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response[TEST_FAMILY_ID]), 2) + self.assertEqual(row_response[TEST_FAMILY_ID][0], cell) + self.assertEqual(row_response[TEST_FAMILY_ID][1], cell2) + self.assertEqual(len(row_response[new_family_id]), 1) + self.assertEqual(row_response[new_family_id][0], cell3) + with self.assertRaises(ValueError): + row_response["not_a_family_id"] + with self.assertRaises(TypeError): + row_response[None] + with self.assertRaises(TypeError): + row_response[b"new_family_id"] + + def test_family_qualifier_indexing(self): + # should be able to retrieve cells in a family/qualifier tuplw + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response[TEST_FAMILY_ID, TEST_QUALIFIER]), 2) + self.assertEqual(row_response[TEST_FAMILY_ID, TEST_QUALIFIER][0], cell) + self.assertEqual(row_response[TEST_FAMILY_ID, TEST_QUALIFIER][1], cell2) + self.assertEqual(len(row_response[new_family_id, new_qualifier]), 1) + self.assertEqual(row_response[new_family_id, new_qualifier][0], cell3) + self.assertEqual(len(row_response["new_family_id", "new_qualifier"]), 1) + self.assertEqual(len(row_response["new_family_id", b"new_qualifier"]), 1) + with self.assertRaises(ValueError): + row_response[new_family_id, "not_a_qualifier"] + with self.assertRaises(ValueError): + row_response["not_a_family_id", new_qualifier] + with self.assertRaises(TypeError): + row_response[None, None] + with self.assertRaises(TypeError): + row_response[b"new_family_id", b"new_qualifier"] + + def test_get_column_components(self): + # should be able to retrieve (family,qualifier) tuples as keys + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response._get_column_components()), 2) + self.assertEqual( + row_response._get_column_components(), + [(TEST_FAMILY_ID, TEST_QUALIFIER), (new_family_id, new_qualifier)], + ) + + row_response = self._make_one(TEST_ROW_KEY, []) + self.assertEqual(len(row_response._get_column_components()), 0) + self.assertEqual(row_response._get_column_components(), []) + + row_response = self._make_one(TEST_ROW_KEY, [cell]) + self.assertEqual(len(row_response._get_column_components()), 1) + self.assertEqual( + row_response._get_column_components(), [(TEST_FAMILY_ID, TEST_QUALIFIER)] + ) + + +class TestCell(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.row import Cell + + return Cell + + def _make_one(self, *args, **kwargs): + if len(args) == 0: + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + cell = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(cell.value, TEST_VALUE) + self.assertEqual(cell.row_key, TEST_ROW_KEY) + self.assertEqual(cell.family, TEST_FAMILY_ID) + self.assertEqual(cell.qualifier, TEST_QUALIFIER) + self.assertEqual(cell.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell.labels, TEST_LABELS) + + def test_to_dict(self): + from google.cloud.bigtable_v2.types import Cell + + cell = self._make_one() + cell_dict = cell._to_dict() + expected_dict = { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + } + self.assertEqual(len(cell_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(cell_dict[key], value) + # should be able to construct a Cell proto from the dict + cell_proto = Cell(**cell_dict) + self.assertEqual(cell_proto.value, TEST_VALUE) + self.assertEqual(cell_proto.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell_proto.labels, TEST_LABELS) + + def test_to_dict_no_labels(self): + from google.cloud.bigtable_v2.types import Cell + + cell_no_labels = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + None, + ) + cell_dict = cell_no_labels._to_dict() + expected_dict = { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + } + self.assertEqual(len(cell_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(cell_dict[key], value) + # should be able to construct a Cell proto from the dict + cell_proto = Cell(**cell_dict) + self.assertEqual(cell_proto.value, TEST_VALUE) + self.assertEqual(cell_proto.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell_proto.labels, []) + + def test_int_value(self): + test_int = 1234 + bytes_value = test_int.to_bytes(4, "big", signed=True) + cell = self._make_one( + bytes_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(int(cell), test_int) + # ensure string formatting works + formatted = "%d" % cell + self.assertEqual(formatted, str(test_int)) + self.assertEqual(int(formatted), test_int) + + def test_int_value_negative(self): + test_int = -99999 + bytes_value = test_int.to_bytes(4, "big", signed=True) + cell = self._make_one( + bytes_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(int(cell), test_int) + # ensure string formatting works + formatted = "%d" % cell + self.assertEqual(formatted, str(test_int)) + self.assertEqual(int(formatted), test_int) + + def test___str__(self): + test_value = b"helloworld" + cell = self._make_one( + test_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(str(cell), "b'helloworld'") + self.assertEqual(str(cell), str(test_value)) + + def test___repr__(self): + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 + + cell = self._make_one() + expected = ( + "Cell(value=b'1234', row_key=b'row', " + + "family='cf1', qualifier=b'col', " + + f"timestamp_micros={TEST_TIMESTAMP}, labels=['label1', 'label2'])" + ) + self.assertEqual(repr(cell), expected) + # should be able to construct instance from __repr__ + result = eval(repr(cell)) + self.assertEqual(result, cell) + + def test___repr___no_labels(self): + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 + + cell_no_labels = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + None, + ) + expected = ( + "Cell(value=b'1234', row_key=b'row', " + + "family='cf1', qualifier=b'col', " + + f"timestamp_micros={TEST_TIMESTAMP}, labels=[])" + ) + self.assertEqual(repr(cell_no_labels), expected) + # should be able to construct instance from __repr__ + result = eval(repr(cell_no_labels)) + self.assertEqual(result, cell_no_labels) + + def test_equality(self): + cell1 = self._make_one() + cell2 = self._make_one() + self.assertEqual(cell1, cell2) + self.assertTrue(cell1 == cell2) + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + for i in range(0, len(args)): + # try changing each argument + modified_cell = self._make_one(*args[:i], args[i] + args[i], *args[i + 1 :]) + self.assertNotEqual(cell1, modified_cell) + self.assertFalse(cell1 == modified_cell) + self.assertTrue(cell1 != modified_cell) + + def test_hash(self): + # class should be hashable + cell1 = self._make_one() + d = {cell1: 1} + cell2 = self._make_one() + self.assertEqual(d[cell2], 1) + + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + for i in range(0, len(args)): + # try changing each argument + modified_cell = self._make_one(*args[:i], args[i] + args[i], *args[i + 1 :]) + with self.assertRaises(KeyError): + d[modified_cell] + + def test_ordering(self): + # create cell list in order from lowest to highest + higher_cells = [] + i = 0 + # families; alphebetical order + for family in ["z", "y", "x"]: + # qualifiers; lowest byte value first + for qualifier in [b"z", b"y", b"x"]: + # timestamps; newest first + for timestamp in [ + TEST_TIMESTAMP, + TEST_TIMESTAMP + 1, + TEST_TIMESTAMP + 2, + ]: + cell = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + family, + qualifier, + timestamp, + TEST_LABELS, + ) + # cell should be the highest priority encountered so far + self.assertEqual(i, len(higher_cells)) + i += 1 + for other in higher_cells: + self.assertLess(cell, other) + higher_cells.append(cell) + # final order should be reverse of sorted order + expected_order = higher_cells + expected_order.reverse() + self.assertEqual(expected_order, sorted(higher_cells)) diff --git a/tests/unit/data/test_row_filters.py b/tests/unit/data/test_row_filters.py new file mode 100644 index 000000000..e90b6f270 --- /dev/null +++ b/tests/unit/data/test_row_filters.py @@ -0,0 +1,2039 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest + + +def test_abstract_class_constructors(): + from google.cloud.bigtable.data.row_filters import RowFilter + from google.cloud.bigtable.data.row_filters import _BoolFilter + from google.cloud.bigtable.data.row_filters import _FilterCombination + from google.cloud.bigtable.data.row_filters import _CellCountFilter + + with pytest.raises(TypeError): + RowFilter() + with pytest.raises(TypeError): + _BoolFilter(False) + with pytest.raises(TypeError): + _FilterCombination([]) + with pytest.raises(TypeError): + _CellCountFilter(0) + + +def test_bool_filter_constructor(): + for FilterType in _get_bool_filters(): + flag = True + row_filter = FilterType(flag) + assert row_filter.flag is flag + + +def test_bool_filter___eq__type_differ(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_bool_filter___eq__same_value(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = FilterType(flag) + assert row_filter1 == row_filter2 + + +def test_bool_filter___ne__same_value(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = FilterType(flag) + assert not (row_filter1 != row_filter2) + + +def test_sink_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import SinkFilter + + flag = True + row_filter = SinkFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(sink=flag) + assert pb_val == expected_pb + + +def test_sink_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import SinkFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = SinkFilter(flag) + expected_dict = {"sink": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_sink_filter___repr__(): + from google.cloud.bigtable.data.row_filters import SinkFilter + + flag = True + row_filter = SinkFilter(flag) + assert repr(row_filter) == "SinkFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_pass_all_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + flag = True + row_filter = PassAllFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(pass_all_filter=flag) + assert pb_val == expected_pb + + +def test_pass_all_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = PassAllFilter(flag) + expected_dict = {"pass_all_filter": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_pass_all_filter___repr__(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + flag = True + row_filter = PassAllFilter(flag) + assert repr(row_filter) == "PassAllFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_block_all_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + + flag = True + row_filter = BlockAllFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(block_all_filter=flag) + assert pb_val == expected_pb + + +def test_block_all_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = BlockAllFilter(flag) + expected_dict = {"block_all_filter": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_block_all_filter___repr__(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + + flag = True + row_filter = BlockAllFilter(flag) + assert repr(row_filter) == "BlockAllFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_regex_filterconstructor(): + for FilterType in _get_regex_filters(): + regex = b"abc" + row_filter = FilterType(regex) + assert row_filter.regex == regex + + +def test_regex_filterconstructor_non_bytes(): + for FilterType in _get_regex_filters(): + regex = "abc" + row_filter = FilterType(regex) + assert row_filter.regex == b"abc" + + +def test_regex_filter__eq__type_differ(): + for FilterType in _get_regex_filters(): + regex = b"def-rgx" + row_filter1 = FilterType(regex) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_regex_filter__eq__same_value(): + for FilterType in _get_regex_filters(): + regex = b"trex-regex" + row_filter1 = FilterType(regex) + row_filter2 = FilterType(regex) + assert row_filter1 == row_filter2 + + +def test_regex_filter__ne__same_value(): + for FilterType in _get_regex_filters(): + regex = b"abc" + row_filter1 = FilterType(regex) + row_filter2 = FilterType(regex) + assert not (row_filter1 != row_filter2) + + +def test_row_key_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(row_key_regex_filter=regex) + assert pb_val == expected_pb + + +def test_row_key_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + expected_dict = {"row_key_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_key_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + assert repr(row_filter) == "RowKeyRegexFilter(regex={})".format(regex) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_row_sample_filter_constructor(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter = RowSampleFilter(sample) + assert row_filter.sample is sample + + +def test_row_sample_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_row_sample_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(sample) + assert row_filter1 == row_filter2 + + +def test_row_sample_filter___ne__(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + other_sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(other_sample) + assert row_filter1 != row_filter2 + + +def test_row_sample_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = 0.25 + row_filter = RowSampleFilter(sample) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(row_sample_filter=sample) + assert pb_val == expected_pb + + +def test_row_sample_filter___repr__(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = 0.25 + row_filter = RowSampleFilter(sample) + assert repr(row_filter) == "RowSampleFilter(sample={})".format(sample) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_family_name_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(family_name_regex_filter=regex) + assert pb_val == expected_pb + + +def test_family_name_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + expected_dict = {"family_name_regex_filter": regex.encode()} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_family_name_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + expected = "FamilyNameRegexFilter(regex=b'family-regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_column_qualifier_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) + assert pb_val == expected_pb + + +def test_column_qualifier_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + expected_dict = {"column_qualifier_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_column_qualifier_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + assert repr(row_filter) == "ColumnQualifierRegexFilter(regex={})".format(regex) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_timestamp_range_constructor(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range = TimestampRange(start=start, end=end) + assert time_range.start is start + assert time_range.end is end + + +def test_timestamp_range___eq__(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert time_range1 == time_range2 + + +def test_timestamp_range___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = object() + assert not (time_range1 == time_range2) + + +def test_timestamp_range___ne__same_value(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert not (time_range1 != time_range2) + + +def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable.data.row_filters import TimestampRange + + if start is not None: + start = _EPOCH + datetime.timedelta(microseconds=start) + if end is not None: + end = _EPOCH + datetime.timedelta(microseconds=end) + time_range = TimestampRange(start=start, end=end) + expected_pb = _TimestampRangePB(**pb_kwargs) + time_pb = time_range._to_pb() + assert time_pb.start_timestamp_micros == expected_pb.start_timestamp_micros + assert time_pb.end_timestamp_micros == expected_pb.end_timestamp_micros + assert time_pb == expected_pb + + +def test_timestamp_range_to_pb(): + start_micros = 30871234 + end_micros = 12939371234 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + end_millis = end_micros // 1000 * 1000 + 1000 + assert end_millis == 12939372000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) + + +def test_timestamp_range_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange( + start=datetime.datetime(2019, 1, 1), end=datetime.datetime(2019, 1, 2) + ) + expected_dict = { + "start_timestamp_micros": 1546300800000000, + "end_timestamp_micros": 1546387200000000, + } + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def test_timestamp_range_to_pb_start_only(): + # Makes sure already milliseconds granularity + start_micros = 30871000 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=None) + + +def test_timestamp_range_to_dict_start_only(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange(start=datetime.datetime(2019, 1, 1)) + expected_dict = {"start_timestamp_micros": 1546300800000000} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def test_timestamp_range_to_pb_end_only(): + # Makes sure already milliseconds granularity + end_micros = 12939371000 + end_millis = end_micros // 1000 * 1000 + assert end_millis == 12939371000 + pb_kwargs = {} + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=end_micros) + + +def test_timestamp_range_to_dict_end_only(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange(end=datetime.datetime(2019, 1, 2)) + expected_dict = {"end_timestamp_micros": 1546387200000000} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def timestamp_range___repr__(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range = TimestampRange(start=start, end=end) + assert repr(time_range) == "TimestampRange(start={}, end={})".format(start, end) + assert repr(time_range) == str(time_range) + assert eval(repr(time_range)) == time_range + + +def test_timestamp_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_timestamp_range_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(range_) + assert row_filter1 == row_filter2 + + +def test_timestamp_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + other_range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(other_range_) + assert row_filter1 != row_filter2 + + +def test_timestamp_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + row_filter = TimestampRangeFilter() + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) + assert pb_val == expected_pb + + +def test_timestamp_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRangeFilter( + start=datetime.datetime(2019, 1, 1), end=datetime.datetime(2019, 1, 2) + ) + expected_dict = { + "timestamp_range_filter": { + "start_timestamp_micros": 1546300800000000, + "end_timestamp_micros": 1546387200000000, + } + } + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_timestamp_range_filter_empty_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = TimestampRangeFilter() + expected_dict = {"timestamp_range_filter": {}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_timestamp_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + import datetime + + start = datetime.datetime(2019, 1, 1) + end = datetime.datetime(2019, 1, 2) + row_filter = TimestampRangeFilter(start, end) + assert ( + repr(row_filter) + == f"TimestampRangeFilter(start={repr(start)}, end={repr(end)})" + ) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_column_range_filter_constructor_defaults(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + row_filter = ColumnRangeFilter(family_id) + assert row_filter.family_id is family_id + assert row_filter.start_qualifier is None + assert row_filter.end_qualifier is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_column_range_filter_constructor_explicit(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter.family_id is family_id + assert row_filter.start_qualifier is start_qualifier + assert row_filter.end_qualifier is end_qualifier + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_column_range_filter_constructor_(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(family_id, inclusive_start=True) + + +def test_column_range_filter_constructor_bad_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(family_id, inclusive_end=True) + + +def test_column_range_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_column_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + row_filter1 = ColumnRangeFilter(family_id) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_column_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + other_family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + other_family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_column_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + row_filter = ColumnRangeFilter(family_id) + col_range_pb = _ColumnRangePB(family_name=family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + family_id = "column-family-id" + row_filter = ColumnRangeFilter(family_id) + expected_dict = {"column_range_filter": {"family_name": family_id}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_column_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, start_qualifier=column) + col_range_pb = _ColumnRangePB(family_name=family_id, start_qualifier_closed=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter( + family_id, start_qualifier=column, inclusive_start=False + ) + col_range_pb = _ColumnRangePB(family_name=family_id, start_qualifier_open=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, end_qualifier=column) + col_range_pb = _ColumnRangePB(family_name=family_id, end_qualifier_closed=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, end_qualifier=column, inclusive_end=False) + col_range_pb = _ColumnRangePB(family_name=family_id, end_qualifier_open=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + start_qualifier = b"column" + end_qualifier = b"column2" + row_filter = ColumnRangeFilter(family_id, start_qualifier, end_qualifier) + expected = "ColumnRangeFilter(family_id='column-family-id', start_qualifier=b'column', end_qualifier=b'column2', inclusive_start=True, inclusive_end=True)" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_value_regex_filter_to_pb_w_bytes(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = regex = b"value-regex" + row_filter = ValueRegexFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_value_regex_filter_to_dict_w_bytes(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = regex = b"value-regex" + row_filter = ValueRegexFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_regex_filter_to_pb_w_str(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = "value-regex" + regex = value.encode("ascii") + row_filter = ValueRegexFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_value_regex_filter_to_dict_w_str(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = "value-regex" + regex = value.encode("ascii") + row_filter = ValueRegexFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = "value-regex" + row_filter = ValueRegexFilter(value) + expected = "ValueRegexFilter(regex=b'value-regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_literal_value_filter_to_pb_w_bytes(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = regex = b"value_regex" + row_filter = LiteralValueFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_literal_value_filter_to_dict_w_bytes(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = regex = b"value_regex" + row_filter = LiteralValueFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_literal_value_filter_to_pb_w_str(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = "value_regex" + regex = value.encode("ascii") + row_filter = LiteralValueFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_literal_value_filter_to_dict_w_str(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = "value_regex" + regex = value.encode("ascii") + row_filter = LiteralValueFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +@pytest.mark.parametrize( + "value,expected_byte_string", + [ + # null bytes are encoded as "\x00" in ascii characters + # others are just prefixed with "\" + (0, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + (1, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\x01"), + ( + 68, + b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00D", + ), # bytes that encode to alphanum are not escaped + (570, b"\\x00\\x00\\x00\\x00\\x00\\x00\\\x02\\\x3a"), + (2852126720, b"\\x00\\x00\\x00\\x00\xaa\\x00\\x00\\x00"), + (-1, b"\xff\xff\xff\xff\xff\xff\xff\xff"), + (-1096642724096, b"\xff\xff\xff\\x00\xaa\xff\xff\\x00"), + ], +) +def test_literal_value_filter_w_int(value, expected_byte_string): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = LiteralValueFilter(value) + # test pb + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=expected_byte_string) + assert pb_val == expected_pb + # test dict + expected_dict = {"value_regex_filter": expected_byte_string} + assert row_filter._to_dict() == expected_dict + assert data_v2_pb2.RowFilter(**expected_dict) == pb_val + + +def test_literal_value_filter___repr__(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = "value_regex" + row_filter = LiteralValueFilter(value) + expected = "LiteralValueFilter(value=b'value_regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_value_range_filter_constructor_defaults(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter = ValueRangeFilter() + + assert row_filter.start_value is None + assert row_filter.end_value is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_value_range_filter_constructor_explicit(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + + row_filter = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + + assert row_filter.start_value is start_value + assert row_filter.end_value is end_value + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_value_range_filter_constructor_w_int_values(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + import struct + + start_value = 1 + end_value = 10 + + row_filter = ValueRangeFilter(start_value=start_value, end_value=end_value) + + expected_start_value = struct.Struct(">q").pack(start_value) + expected_end_value = struct.Struct(">q").pack(end_value) + + assert row_filter.start_value == expected_start_value + assert row_filter.end_value == expected_end_value + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_value_range_filter_constructor_bad_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_start=True) + + +def test_value_range_filter_constructor_bad_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_end=True) + + +def test_value_range_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_value_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter1 = ValueRangeFilter() + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_value_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + other_start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=other_start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_value_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter = ValueRangeFilter() + expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = ValueRangeFilter() + expected_dict = {"value_range_filter": {}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value) + val_range_pb = _ValueRangePB(start_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value, inclusive_start=False) + val_range_pb = _ValueRangePB(start_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value) + val_range_pb = _ValueRangePB(end_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value, inclusive_end=False) + val_range_pb = _ValueRangePB(end_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = b"some-value" + end_value = b"some-other-value" + row_filter = ValueRangeFilter( + start_value=start_value, end_value=end_value, inclusive_end=False + ) + expected = "ValueRangeFilter(start_value=b'some-value', end_value=b'some-other-value', inclusive_start=True, inclusive_end=False)" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cell_count_constructor(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter = FilerType(num_cells) + assert row_filter.num_cells is num_cells + + +def test_cell_count___eq__type_differ(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_cell_count___eq__same_value(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = FilerType(num_cells) + assert row_filter1 == row_filter2 + + +def test_cell_count___ne__same_value(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = FilerType(num_cells) + assert not (row_filter1 != row_filter2) + + +def test_cells_row_offset_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_row_offset_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + expected_dict = {"cells_per_row_offset_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_row_offset_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + expected = "CellsRowOffsetFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cells_row_limit_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_row_limit_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + expected_dict = {"cells_per_row_limit_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_row_limit_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + expected = "CellsRowLimitFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cells_column_limit_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_column_limit_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + expected_dict = {"cells_per_column_limit_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_column_limit_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + expected = "CellsColumnLimitFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_strip_value_transformer_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + flag = True + row_filter = StripValueTransformerFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(strip_value_transformer=flag) + assert pb_val == expected_pb + + +def test_strip_value_transformer_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = StripValueTransformerFilter(flag) + expected_dict = {"strip_value_transformer": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_strip_value_transformer_filter___repr__(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + flag = True + row_filter = StripValueTransformerFilter(flag) + expected = "StripValueTransformerFilter(flag={})".format(flag) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_apply_label_filter_constructor(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter = ApplyLabelFilter(label) + assert row_filter.label is label + + +def test_apply_label_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_apply_label_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(label) + assert row_filter1 == row_filter2 + + +def test_apply_label_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + other_label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(other_label) + assert row_filter1 != row_filter2 + + +def test_apply_label_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = "label" + row_filter = ApplyLabelFilter(label) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(apply_label_transformer=label) + assert pb_val == expected_pb + + +def test_apply_label_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + label = "label" + row_filter = ApplyLabelFilter(label) + expected_dict = {"apply_label_transformer": label} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_apply_label_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = "label" + row_filter = ApplyLabelFilter(label) + expected = "ApplyLabelFilter(label={})".format(label) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_filter_combination_constructor_defaults(): + for FilterType in _get_filter_combination_filters(): + row_filter = FilterType() + assert row_filter.filters == [] + + +def test_filter_combination_constructor_explicit(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter = FilterType(filters=filters) + assert row_filter.filters is filters + + +def test_filter_combination___eq__(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = FilterType(filters=filters) + assert row_filter1 == row_filter2 + + +def test_filter_combination___eq__type_differ(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_filter_combination___ne__(): + for FilterType in _get_filter_combination_filters(): + filters = object() + other_filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = FilterType(filters=other_filters) + assert row_filter1 != row_filter2 + + +def test_filter_combination_len(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + assert len(row_filter) == len(filters) + + +def test_filter_combination_iter(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + assert list(iter(row_filter)) == filters + for filter_, expected in zip(row_filter, filters): + assert filter_ is expected + + +def test_filter_combination___getitem__(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + row_filter[0] is filters[0] + row_filter[1] is filters[1] + with pytest.raises(IndexError): + row_filter[2] + row_filter[:] is filters[:] + + +def test_filter_combination___str__(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + for FilterType in _get_filter_combination_filters(): + filters = [PassAllFilter(True), PassAllFilter(False)] + row_filter = FilterType(filters=filters) + expected = ( + "([\n PassAllFilter(flag=True),\n PassAllFilter(flag=False),\n])" + ) + assert expected in str(row_filter) + + +def test_row_filter_chain_to_pb(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_chain_to_dict(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + filter_dict = row_filter3._to_dict() + + expected_dict = {"chain": {"filters": [row_filter1_dict, row_filter2_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_chain_to_pb_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4._to_pb() + + row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5._to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_chain_to_dict_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_dict = row_filter4._to_dict() + + row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4]) + filter_dict = row_filter5._to_dict() + + expected_dict = {"chain": {"filters": [row_filter3_dict, row_filter4_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter5._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_chain___repr__(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + expected = f"RowFilterChain(filters={[row_filter1, row_filter2]})" + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + + +def test_row_filter_chain___str__(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + expected = "RowFilterChain([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n])" + assert str(row_filter3) == expected + # test nested + row_filter4 = RowFilterChain(filters=[row_filter3]) + expected = "RowFilterChain([\n RowFilterChain([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n])" + assert str(row_filter4) == expected + + +def test_row_filter_union_to_pb(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_dict(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + filter_dict = row_filter3._to_dict() + + expected_dict = {"interleave": {"filters": [row_filter1_dict, row_filter2_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_union_to_pb_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4._to_pb() + + row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5._to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_dict_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_dict = row_filter4._to_dict() + + row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4]) + filter_dict = row_filter5._to_dict() + + expected_dict = {"interleave": {"filters": [row_filter3_dict, row_filter4_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter5._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_union___repr__(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + expected = "RowFilterUnion(filters=[StripValueTransformerFilter(flag=True), RowSampleFilter(sample=0.25)])" + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + + +def test_row_filter_union___str__(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + expected = "RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n])" + assert str(row_filter3) == expected + # test nested + row_filter4 = RowFilterUnion(filters=[row_filter3]) + expected = "RowFilterUnion([\n RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n])" + assert str(row_filter4) == expected + + +def test_conditional_row_filter_constructor(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter.predicate_filter is predicate_filter + assert cond_filter.true_filter is true_filter + assert cond_filter.false_filter is false_filter + + +def test_conditional_row_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 == cond_filter2 + + +def test_conditional_row_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = object() + assert not (cond_filter1 == cond_filter2) + + +def test_conditional_row_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + other_predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + other_predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 != cond_filter2 + + +def test_conditional_row_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = ConditionalRowFilter( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) + filter_pb = row_filter4._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + true_filter=row_filter2_pb, + false_filter=row_filter3_pb, + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = ConditionalRowFilter( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) + filter_dict = row_filter4._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "true_filter": row_filter2_dict, + "false_filter": row_filter3_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter4._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter_to_pb_true_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, true_filter=row_filter2_pb + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict_true_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + filter_dict = row_filter3._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "true_filter": row_filter2_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter_to_pb_false_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, false_filter=row_filter2_pb + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict_false_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2) + filter_dict = row_filter3._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "false_filter": row_filter2_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + expected = ( + "ConditionalRowFilter(predicate_filter=StripValueTransformerFilter(" + "flag=True), true_filter=RowSampleFilter(sample=0.25), false_filter=None)" + ) + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + # test nested + row_filter4 = ConditionalRowFilter(row_filter3, true_filter=row_filter2) + expected = "ConditionalRowFilter(predicate_filter=ConditionalRowFilter(predicate_filter=StripValueTransformerFilter(flag=True), true_filter=RowSampleFilter(sample=0.25), false_filter=None), true_filter=RowSampleFilter(sample=0.25), false_filter=None)" + assert repr(row_filter4) == expected + assert eval(repr(row_filter4)) == row_filter4 + + +def test_conditional_row_filter___str__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + expected = "ConditionalRowFilter(\n predicate_filter=StripValueTransformerFilter(flag=True),\n true_filter=RowSampleFilter(sample=0.25),\n)" + assert str(row_filter3) == expected + # test nested + row_filter4 = ConditionalRowFilter( + row_filter3, + true_filter=row_filter2, + false_filter=RowFilterUnion([row_filter1, row_filter2]), + ) + expected = "ConditionalRowFilter(\n predicate_filter=ConditionalRowFilter(\n predicate_filter=StripValueTransformerFilter(flag=True),\n true_filter=RowSampleFilter(sample=0.25),\n ),\n true_filter=RowSampleFilter(sample=0.25),\n false_filter=RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n)" + assert str(row_filter4) == expected + + +@pytest.mark.parametrize( + "input_arg, expected_bytes", + [ + (b"abc", b"abc"), + ("abc", b"abc"), + (1, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\x01"), # null bytes are ascii + (b"*", b"\\*"), + (".", b"\\."), + (b"\\", b"\\\\"), + (b"h.*i", b"h\\.\\*i"), + (b'""', b'\\"\\"'), + (b"[xyz]", b"\\[xyz\\]"), + (b"\xe2\x98\xba\xef\xb8\x8f", b"\xe2\x98\xba\xef\xb8\x8f"), + ("☃", b"\xe2\x98\x83"), + (r"\C☃", b"\\\\C\xe2\x98\x83"), + ], +) +def test_literal_value__write_literal_regex(input_arg, expected_bytes): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + filter_ = LiteralValueFilter(input_arg) + assert filter_.regex == expected_bytes + + +def _ColumnRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.ColumnRange(*args, **kw) + + +def _RowFilterPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter(*args, **kw) + + +def _RowFilterChainPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Chain(*args, **kw) + + +def _RowFilterConditionPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Condition(*args, **kw) + + +def _RowFilterInterleavePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Interleave(*args, **kw) + + +def _TimestampRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.TimestampRange(*args, **kw) + + +def _ValueRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.ValueRange(*args, **kw) + + +def _get_regex_filters(): + from google.cloud.bigtable.data.row_filters import ( + RowKeyRegexFilter, + FamilyNameRegexFilter, + ColumnQualifierRegexFilter, + ValueRegexFilter, + LiteralValueFilter, + ) + + return [ + RowKeyRegexFilter, + FamilyNameRegexFilter, + ColumnQualifierRegexFilter, + ValueRegexFilter, + LiteralValueFilter, + ] + + +def _get_bool_filters(): + from google.cloud.bigtable.data.row_filters import ( + SinkFilter, + PassAllFilter, + BlockAllFilter, + StripValueTransformerFilter, + ) + + return [ + SinkFilter, + PassAllFilter, + BlockAllFilter, + StripValueTransformerFilter, + ] + + +def _get_cell_count_filters(): + from google.cloud.bigtable.data.row_filters import ( + CellsRowLimitFilter, + CellsRowOffsetFilter, + CellsColumnLimitFilter, + ) + + return [ + CellsRowLimitFilter, + CellsRowOffsetFilter, + CellsColumnLimitFilter, + ] + + +def _get_filter_combination_filters(): + from google.cloud.bigtable.data.row_filters import ( + RowFilterChain, + RowFilterUnion, + ) + + return [ + RowFilterChain, + RowFilterUnion, + ] diff --git a/tests/unit/data/test_sync_up_to_date.py b/tests/unit/data/test_sync_up_to_date.py new file mode 100644 index 000000000..e6bce9cf6 --- /dev/null +++ b/tests/unit/data/test_sync_up_to_date.py @@ -0,0 +1,99 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import hashlib +import pytest +import ast +import re +from difflib import unified_diff + +if sys.version_info < (3, 9): + pytest.skip("ast.unparse is only available in 3.9+", allow_module_level=True) + +# add cross_sync to path +test_dir_name = os.path.dirname(__file__) +repo_root = os.path.join(test_dir_name, "..", "..", "..") +cross_sync_path = os.path.join(repo_root, ".cross_sync") +sys.path.append(cross_sync_path) + +from generate import convert_files_in_dir, CrossSyncOutputFile # noqa: E402 + +sync_files = list(convert_files_in_dir(repo_root)) + + +def test_found_files(): + """ + Make sure sync_test is populated with some of the files we expect to see, + to ensure that later tests are actually running. + """ + assert len(sync_files) > 0, "No sync files found" + assert len(sync_files) > 10, "Unexpectedly few sync files found" + # test for key files + outputs = [os.path.basename(f.output_path) for f in sync_files] + assert "client.py" in outputs + assert "execute_query_iterator.py" in outputs + assert "test_client.py" in outputs + assert "test_system_autogen.py" in outputs, "system tests not found" + assert ( + "client_handler_data_sync_autogen.py" in outputs + ), "test proxy handler not found" + + +@pytest.mark.parametrize("sync_file", sync_files, ids=lambda f: f.output_path) +def test_sync_up_to_date(sync_file): + """ + Generate a fresh copy of each cross_sync file, and compare hashes with the existing file. + + If this test fails, run `nox -s generate_sync` to update the sync files. + """ + path = sync_file.output_path + new_render = sync_file.render(with_formatter=True, save_to_disk=False) + found_render = CrossSyncOutputFile( + output_path="", ast_tree=ast.parse(open(path).read()), header=sync_file.header + ).render(with_formatter=True, save_to_disk=False) + # compare by content + diff = unified_diff(found_render.splitlines(), new_render.splitlines(), lineterm="") + diff_str = "\n".join(diff) + assert ( + not diff_str + ), f"Found differences. Run `nox -s generate_sync` to update:\n{diff_str}" + # compare by hash + new_hash = hashlib.md5(new_render.encode()).hexdigest() + found_hash = hashlib.md5(found_render.encode()).hexdigest() + assert new_hash == found_hash, f"md5 mismatch for {path}" + + +@pytest.mark.parametrize("sync_file", sync_files, ids=lambda f: f.output_path) +def test_verify_headers(sync_file): + license_regex = r""" + \#\ Copyright\ \d{4}\ Google\ LLC\n + \#\n + \#\ Licensed\ under\ the\ Apache\ License,\ Version\ 2\.0\ \(the\ \"License\"\);\n + \#\ you\ may\ not\ use\ this\ file\ except\ in\ compliance\ with\ the\ License\.\n + \#\ You\ may\ obtain\ a\ copy\ of\ the\ License\ at\ + \#\n + \#\s+http:\/\/www\.apache\.org\/licenses\/LICENSE-2\.0\n + \#\n + \#\ Unless\ required\ by\ applicable\ law\ or\ agreed\ to\ in\ writing,\ software\n + \#\ distributed\ under\ the\ License\ is\ distributed\ on\ an\ \"AS\ IS\"\ BASIS,\n + \#\ WITHOUT\ WARRANTIES\ OR\ CONDITIONS\ OF\ ANY\ KIND,\ either\ express\ or\ implied\.\n + \#\ See\ the\ License\ for\ the\ specific\ language\ governing\ permissions\ and\n + \#\ limitations\ under\ the\ License + """ + pattern = re.compile(license_regex, re.VERBOSE) + + with open(sync_file.output_path, "r") as f: + content = f.read() + assert pattern.search(content), "Missing license header" diff --git a/tests/unit/gapic/__init__.py b/tests/unit/gapic/__init__.py index 89a37dc92..cbf94b283 100644 --- a/tests/unit/gapic/__init__.py +++ b/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_admin_v2/__init__.py b/tests/unit/gapic/bigtable_admin_v2/__init__.py index 89a37dc92..cbf94b283 100644 --- a/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b2caa98ba..b0ba35f0c 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,11 +24,12 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -36,6 +37,13 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future @@ -46,6 +54,7 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( @@ -71,10 +80,32 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -86,6 +117,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -116,6 +158,359 @@ def test__get_default_mtls_endpoint(): ) +def test__read_environment_variables(): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + else: + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test_use_client_cert_effective(): + # Test case 1: Test when `should_use_client_cert` returns True. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=True + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is True + + # Test case 2: Test when `should_use_client_cert` returns False. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should NOT be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=False + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 3: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableInstanceAdminClient._use_client_cert_effective() is True + + # Test case 4: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"} + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 5: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}): + assert BigtableInstanceAdminClient._use_client_cert_effective() is True + + # Test case 6: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"} + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 7: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}): + assert BigtableInstanceAdminClient._use_client_cert_effective() is True + + # Test case 8: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"} + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 9: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set. + # In this case, the method should return False, which is the default value. + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, clear=True): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 10: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should raise a ValueError as the environment variable must be either + # "true" or "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + with pytest.raises(ValueError): + BigtableInstanceAdminClient._use_client_cert_effective() + + # Test case 11: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should return False as the environment variable is set to an invalid value. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + # Test case 12: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also, + # the GOOGLE_API_CONFIG environment variable is unset. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}): + with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}): + assert BigtableInstanceAdminClient._use_client_cert_effective() is False + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableInstanceAdminClient._get_client_cert_source(None, False) is None + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableInstanceAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableInstanceAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, None) + == BigtableInstanceAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BigtableInstanceAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BigtableInstanceAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -239,13 +634,13 @@ def test_bigtable_instance_admin_client_get_transport_class(): ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) def test_bigtable_instance_admin_client_client_options( client_class, transport_class, transport_name @@ -287,7 +682,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -317,15 +714,12 @@ def test_bigtable_instance_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -335,7 +729,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -353,7 +749,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -406,13 +804,13 @@ def test_bigtable_instance_admin_client_client_options( ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_instance_admin_client_mtls_env_auto( @@ -435,7 +833,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -467,7 +867,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -501,7 +903,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -552,6 +956,119 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client assert api_endpoint == mock_api_endpoint assert cert_source is None + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported". + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset. + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None) + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty). + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() @@ -591,6 +1108,103 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] +) +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -625,7 +1239,9 @@ def test_bigtable_instance_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -670,7 +1286,9 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -730,7 +1348,9 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -802,54 +1422,157 @@ def test_create_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + request = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_create_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_create_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - client.create_instance() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_instance(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == bigtable_instance_admin.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + ) -@pytest.mark.asyncio -async def test_create_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_create_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Ensure method has been cached + assert client._transport.create_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.create_instance(request) + client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc + request = {} + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_instance + ] = mock_rpc + + request = {} + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + request = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -892,7 +1615,7 @@ def test_create_instance_field_headers(): @pytest.mark.asyncio async def test_create_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -977,7 +1700,7 @@ def test_create_instance_flattened_error(): @pytest.mark.asyncio async def test_create_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1018,7 +1741,7 @@ async def test_create_instance_flattened_async(): @pytest.mark.asyncio async def test_create_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1059,13 +1782,15 @@ def test_get_instance(request_type, transport: str = "grpc"): state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() + request = bigtable_instance_admin.GetInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1074,22 +1799,112 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True -def test_get_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_get_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetInstanceRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - client.get_instance() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_instance(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() + assert args[0] == bigtable_instance_admin.GetInstanceRequest( + name="name_value", + ) + + +def test_get_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc + request = {} + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_instance + ] = mock_rpc + + request = {} + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1098,7 +1913,7 @@ async def test_get_instance_async( request_type=bigtable_instance_admin.GetInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1116,6 +1931,7 @@ async def test_get_instance_async( state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_instance(request) @@ -1123,7 +1939,8 @@ async def test_get_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() + request = bigtable_instance_admin.GetInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1132,6 +1949,7 @@ async def test_get_instance_async( assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -1171,7 +1989,7 @@ def test_get_instance_field_headers(): @pytest.mark.asyncio async def test_get_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1239,7 +2057,7 @@ def test_get_instance_flattened_error(): @pytest.mark.asyncio async def test_get_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1266,7 +2084,7 @@ async def test_get_instance_flattened_async(): @pytest.mark.asyncio async def test_get_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1307,7 +2125,8 @@ def test_list_instances(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() + request = bigtable_instance_admin.ListInstancesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response.raw_page is response @@ -1316,20 +2135,111 @@ def test_list_instances(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_instances_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_list_instances_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListInstancesRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - client.list_instances() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_instances(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() + assert args[0] == bigtable_instance_admin.ListInstancesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_instances_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_instances in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc + request = {} + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_instances_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_instances + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_instances + ] = mock_rpc + + request = {} + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1338,7 +2248,7 @@ async def test_list_instances_async( request_type=bigtable_instance_admin.ListInstancesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1360,7 +2270,8 @@ async def test_list_instances_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() + request = bigtable_instance_admin.ListInstancesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) @@ -1405,7 +2316,7 @@ def test_list_instances_field_headers(): @pytest.mark.asyncio async def test_list_instances_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1475,7 +2386,7 @@ def test_list_instances_flattened_error(): @pytest.mark.asyncio async def test_list_instances_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1504,7 +2415,7 @@ async def test_list_instances_flattened_async(): @pytest.mark.asyncio async def test_list_instances_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1542,13 +2453,15 @@ def test_update_instance(request_type, transport: str = "grpc"): state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() + request = instance.Instance() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1557,22 +2470,114 @@ def test_update_instance(request_type, transport: str = "grpc"): assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True -def test_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_update_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = instance.Instance( + name="name_value", + display_name="display_name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - client.update_instance() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_instance(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() + assert args[0] == instance.Instance( + name="name_value", + display_name="display_name_value", + ) + + +def test_update_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc + request = {} + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_instance + ] = mock_rpc + + request = {} + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1580,7 +2585,7 @@ async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1598,6 +2603,7 @@ async def test_update_instance_async( state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_instance(request) @@ -1605,7 +2611,8 @@ async def test_update_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() + request = instance.Instance() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1614,6 +2621,7 @@ async def test_update_instance_async( assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -1653,7 +2661,7 @@ def test_update_instance_field_headers(): @pytest.mark.asyncio async def test_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1708,37 +2716,138 @@ def test_partial_update_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_partial_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.partial_update_instance), "__call__" ) as call: - client.partial_update_instance() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.partial_update_instance(request=request) call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() +def test_partial_update_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_instance + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_instance + ] = mock_rpc + request = {} + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_partial_update_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.partial_update_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.partial_update_instance + ] = mock_rpc + + request = {} + await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + @pytest.mark.asyncio async def test_partial_update_instance_async( transport: str = "grpc_asyncio", request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1759,7 +2868,8 @@ async def test_partial_update_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1804,7 +2914,7 @@ def test_partial_update_instance_field_headers(): @pytest.mark.asyncio async def test_partial_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1883,7 +2993,7 @@ def test_partial_update_instance_flattened_error(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1918,7 +3028,7 @@ async def test_partial_update_instance_flattened_async(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1957,39 +3067,129 @@ def test_delete_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.DeleteInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None -def test_delete_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_delete_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteInstanceRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - client.delete_instance() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_instance(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest( + name="name_value", + ) -@pytest.mark.asyncio -async def test_delete_instance_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # Everything is optional in proto3 as far as the runtime is concerned, +def test_delete_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc + request = {} + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_instance + ] = mock_rpc + + request = {} + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() @@ -2002,7 +3202,8 @@ async def test_delete_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.DeleteInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2045,7 +3246,7 @@ def test_delete_instance_field_headers(): @pytest.mark.asyncio async def test_delete_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2113,7 +3314,7 @@ def test_delete_instance_flattened_error(): @pytest.mark.asyncio async def test_delete_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2140,7 +3341,7 @@ async def test_delete_instance_flattened_async(): @pytest.mark.asyncio async def test_delete_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2178,26 +3379,128 @@ def test_create_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.CreateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_create_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - client.create_cluster() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_cluster(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() + assert args[0] == bigtable_instance_admin.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + +def test_create_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_cluster + ] = mock_rpc + + request = {} + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2206,7 +3509,7 @@ async def test_create_cluster_async( request_type=bigtable_instance_admin.CreateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2225,7 +3528,8 @@ async def test_create_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.CreateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2268,7 +3572,7 @@ def test_create_cluster_field_headers(): @pytest.mark.asyncio async def test_create_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2348,7 +3652,7 @@ def test_create_cluster_flattened_error(): @pytest.mark.asyncio async def test_create_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2385,7 +3689,7 @@ async def test_create_cluster_flattened_async(): @pytest.mark.asyncio async def test_create_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2424,6 +3728,7 @@ def test_get_cluster(request_type, transport: str = "grpc"): location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) response = client.get_cluster(request) @@ -2431,7 +3736,8 @@ def test_get_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() + request = bigtable_instance_admin.GetClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Cluster) @@ -2439,23 +3745,116 @@ def test_get_cluster(request_type, transport: str = "grpc"): assert response.location == "location_value" assert response.state == instance.Cluster.State.READY assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) assert response.default_storage_type == common.StorageType.SSD -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_get_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetClusterRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - client.get_cluster() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_cluster(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() + assert args[0] == bigtable_instance_admin.GetClusterRequest( + name="name_value", + ) + + +def test_get_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_cluster + ] = mock_rpc + + request = {} + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2464,7 +3863,7 @@ async def test_get_cluster_async( request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2481,6 +3880,7 @@ async def test_get_cluster_async( location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) ) @@ -2489,7 +3889,8 @@ async def test_get_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() + request = bigtable_instance_admin.GetClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Cluster) @@ -2497,6 +3898,10 @@ async def test_get_cluster_async( assert response.location == "location_value" assert response.state == instance.Cluster.State.READY assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) assert response.default_storage_type == common.StorageType.SSD @@ -2537,7 +3942,7 @@ def test_get_cluster_field_headers(): @pytest.mark.asyncio async def test_get_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2605,7 +4010,7 @@ def test_get_cluster_flattened_error(): @pytest.mark.asyncio async def test_get_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2632,7 +4037,7 @@ async def test_get_cluster_flattened_async(): @pytest.mark.asyncio async def test_get_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2673,7 +4078,8 @@ def test_list_clusters(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() + request = bigtable_instance_admin.ListClustersRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response.raw_page is response @@ -2682,20 +4088,111 @@ def test_list_clusters(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_list_clusters_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - client.list_clusters() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_clusters(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() + assert args[0] == bigtable_instance_admin.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_clusters_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_clusters_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_clusters + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_clusters + ] = mock_rpc + + request = {} + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2704,7 +4201,7 @@ async def test_list_clusters_async( request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2726,7 +4223,8 @@ async def test_list_clusters_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() + request = bigtable_instance_admin.ListClustersRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListClustersResponse) @@ -2771,7 +4269,7 @@ def test_list_clusters_field_headers(): @pytest.mark.asyncio async def test_list_clusters_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2841,7 +4339,7 @@ def test_list_clusters_flattened_error(): @pytest.mark.asyncio async def test_list_clusters_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2870,7 +4368,7 @@ async def test_list_clusters_flattened_async(): @pytest.mark.asyncio async def test_list_clusters_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2908,26 +4406,128 @@ def test_update_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() + request = instance.Cluster() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = instance.Cluster( + name="name_value", + location="location_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - client.update_cluster() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_cluster(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() + assert args[0] == instance.Cluster( + name="name_value", + location="location_value", + ) + + +def test_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_cluster + ] = mock_rpc + + request = {} + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2935,7 +4535,7 @@ async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2954,7 +4554,8 @@ async def test_update_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() + request = instance.Cluster() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2997,7 +4598,7 @@ def test_update_cluster_field_headers(): @pytest.mark.asyncio async def test_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3054,40 +4655,141 @@ def test_partial_update_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + request = bigtable_instance_admin.PartialUpdateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_partial_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.partial_update_cluster), "__call__" ) as call: - client.partial_update_cluster() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.partial_update_cluster(request=request) call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() -@pytest.mark.asyncio -async def test_partial_update_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - +def test_partial_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc + request = {} + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_partial_update_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.partial_update_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.partial_update_cluster + ] = mock_rpc + + request = {} + await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.partial_update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_partial_update_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() @@ -3105,7 +4807,8 @@ async def test_partial_update_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + request = bigtable_instance_admin.PartialUpdateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -3150,7 +4853,7 @@ def test_partial_update_cluster_field_headers(): @pytest.mark.asyncio async def test_partial_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3229,7 +4932,7 @@ def test_partial_update_cluster_flattened_error(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3264,7 +4967,7 @@ async def test_partial_update_cluster_flattened_async(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3303,26 +5006,116 @@ def test_delete_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.DeleteClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteClusterRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - client.delete_cluster() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_cluster(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + assert args[0] == bigtable_instance_admin.DeleteClusterRequest( + name="name_value", + ) + + +def test_delete_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_cluster + ] = mock_rpc + + request = {} + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3331,7 +5124,7 @@ async def test_delete_cluster_async( request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3348,7 +5141,8 @@ async def test_delete_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.DeleteClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -3391,7 +5185,7 @@ def test_delete_cluster_field_headers(): @pytest.mark.asyncio async def test_delete_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3459,7 +5253,7 @@ def test_delete_cluster_flattened_error(): @pytest.mark.asyncio async def test_delete_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3486,7 +5280,7 @@ async def test_delete_cluster_flattened_async(): @pytest.mark.asyncio async def test_delete_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3524,13 +5318,15 @@ def test_create_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.CreateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -3539,22 +5335,117 @@ def test_create_app_profile(request_type, transport: str = "grpc"): assert response.description == "description_value" -def test_create_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_app_profile), "__call__" ) as call: - client.create_app_profile() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_app_profile(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + ) + + +def test_create_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc + request = {} + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_app_profile + ] = mock_rpc + + request = {} + await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3563,7 +5454,7 @@ async def test_create_app_profile_async( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3588,7 +5479,8 @@ async def test_create_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.CreateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -3636,7 +5528,7 @@ def test_create_app_profile_field_headers(): @pytest.mark.asyncio async def test_create_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3718,7 +5610,7 @@ def test_create_app_profile_flattened_error(): @pytest.mark.asyncio async def test_create_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3755,7 +5647,7 @@ async def test_create_app_profile_flattened_async(): @pytest.mark.asyncio async def test_create_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3793,13 +5685,15 @@ def test_get_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + request = bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -3808,20 +5702,109 @@ def test_get_app_profile(request_type, transport: str = "grpc"): assert response.description == "description_value" -def test_get_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetAppProfileRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - client.get_app_profile() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_app_profile(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == bigtable_instance_admin.GetAppProfileRequest( + name="name_value", + ) + + +def test_get_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_app_profile in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc + request = {} + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_app_profile + ] = mock_rpc + + request = {} + await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3830,7 +5813,7 @@ async def test_get_app_profile_async( request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3853,7 +5836,8 @@ async def test_get_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + request = bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -3899,7 +5883,7 @@ def test_get_app_profile_field_headers(): @pytest.mark.asyncio async def test_get_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3967,7 +5951,7 @@ def test_get_app_profile_flattened_error(): @pytest.mark.asyncio async def test_get_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3994,7 +5978,7 @@ async def test_get_app_profile_flattened_async(): @pytest.mark.asyncio async def test_get_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4037,7 +6021,8 @@ def test_list_app_profiles(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + request = bigtable_instance_admin.ListAppProfilesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAppProfilesPager) @@ -4045,22 +6030,115 @@ def test_list_app_profiles(request_type, transport: str = "grpc"): assert response.failed_locations == ["failed_locations_value"] -def test_list_app_profiles_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListAppProfilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_app_profiles), "__call__" ) as call: - client.list_app_profiles() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_app_profiles(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_app_profiles_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_app_profiles in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_app_profiles + ] = mock_rpc + request = {} + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_app_profiles + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_app_profiles + ] = mock_rpc + + request = {} + await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4069,7 +6147,7 @@ async def test_list_app_profiles_async( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4093,7 +6171,8 @@ async def test_list_app_profiles_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + request = bigtable_instance_admin.ListAppProfilesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAppProfilesAsyncPager) @@ -4140,7 +6219,7 @@ def test_list_app_profiles_field_headers(): @pytest.mark.asyncio async def test_list_app_profiles_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4214,7 +6293,7 @@ def test_list_app_profiles_flattened_error(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4245,7 +6324,7 @@ async def test_list_app_profiles_flattened_async(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4259,7 +6338,7 @@ async def test_list_app_profiles_flattened_error_async(): def test_list_app_profiles_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4296,13 +6375,17 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_app_profiles(request={}) + pager = client.list_app_profiles(request={}, retry=retry, timeout=timeout) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -4311,7 +6394,7 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): def test_list_app_profiles_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4355,7 +6438,7 @@ def test_list_app_profiles_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_app_profiles_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4407,7 +6490,7 @@ async def test_list_app_profiles_async_pager(): @pytest.mark.asyncio async def test_list_app_profiles_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4483,41 +6566,141 @@ def test_update_app_profile(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_update_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateAppProfileRequest() + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_app_profile), "__call__" ) as call: - client.update_app_profile() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_app_profile(request=request) call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() -@pytest.mark.asyncio -async def test_update_app_profile_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, +def test_update_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc + request = {} + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_app_profile + ] = mock_rpc + + request = {} + await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() @@ -4534,7 +6717,8 @@ async def test_update_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -4579,7 +6763,7 @@ def test_update_app_profile_field_headers(): @pytest.mark.asyncio async def test_update_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4658,7 +6842,7 @@ def test_update_app_profile_flattened_error(): @pytest.mark.asyncio async def test_update_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4693,7 +6877,7 @@ async def test_update_app_profile_flattened_async(): @pytest.mark.asyncio async def test_update_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4734,28 +6918,122 @@ def test_delete_app_profile(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + request = bigtable_instance_admin.DeleteAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None -def test_delete_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteAppProfileRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_app_profile), "__call__" ) as call: - client.delete_app_profile() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_app_profile(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest( + name="name_value", + ) + + +def test_delete_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_app_profile + ] = mock_rpc + request = {} + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_app_profile + ] = mock_rpc + + request = {} + await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4764,7 +7042,7 @@ async def test_delete_app_profile_async( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4783,7 +7061,8 @@ async def test_delete_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + request = bigtable_instance_admin.DeleteAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -4828,7 +7107,7 @@ def test_delete_app_profile_field_headers(): @pytest.mark.asyncio async def test_delete_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4872,6 +7151,7 @@ def test_delete_app_profile_flattened(): # using the keyword arguments to the method. client.delete_app_profile( name="name_value", + ignore_warnings=True, ) # Establish that the underlying call was made with the expected @@ -4881,6 +7161,9 @@ def test_delete_app_profile_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].ignore_warnings + mock_val = True + assert arg == mock_val def test_delete_app_profile_flattened_error(): @@ -4894,13 +7177,14 @@ def test_delete_app_profile_flattened_error(): client.delete_app_profile( bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ignore_warnings=True, ) @pytest.mark.asyncio async def test_delete_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4915,6 +7199,7 @@ async def test_delete_app_profile_flattened_async(): # using the keyword arguments to the method. response = await client.delete_app_profile( name="name_value", + ignore_warnings=True, ) # Establish that the underlying call was made with the expected @@ -4924,12 +7209,15 @@ async def test_delete_app_profile_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].ignore_warnings + mock_val = True + assert arg == mock_val @pytest.mark.asyncio async def test_delete_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4938,6 +7226,7 @@ async def test_delete_app_profile_flattened_error_async(): await client.delete_app_profile( bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ignore_warnings=True, ) @@ -4970,7 +7259,8 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -4978,20 +7268,109 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - client.get_iam_policy() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + +def test_get_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_iam_policy + ] = mock_rpc + + request = {} + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4999,7 +7378,7 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5021,7 +7400,8 @@ async def test_get_iam_policy_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5066,7 +7446,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5151,7 +7531,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5178,7 +7558,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5219,7 +7599,8 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5227,20 +7608,109 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - client.set_iam_policy() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + +def test_set_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.set_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.set_iam_policy + ] = mock_rpc + + request = {} + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5248,7 +7718,7 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5270,7 +7740,8 @@ async def test_set_iam_policy_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5315,7 +7786,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5401,7 +7872,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5428,7 +7899,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5470,29 +7941,123 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ["permissions_value"] -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: - client.test_iam_permissions() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + +def test_test_iam_permissions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.test_iam_permissions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5501,7 +8066,7 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5524,7 +8089,8 @@ async def test_test_iam_permissions_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) @@ -5570,7 +8136,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5668,7 +8234,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5703,7 +8269,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5744,38 +8310,132 @@ def test_list_hot_tablets(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + request = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListHotTabletsPager) assert response.next_page_token == "next_page_token_value" -def test_list_hot_tablets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListHotTabletsRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - client.list_hot_tablets() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_hot_tablets(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest( + parent="parent_value", + page_token="page_token_value", + ) -@pytest.mark.asyncio -async def test_list_hot_tablets_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.ListHotTabletsRequest, -): - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_list_hot_tablets_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_hot_tablets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_hot_tablets + ] = mock_rpc + request = {} + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_hot_tablets + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_hot_tablets + ] = mock_rpc + + request = {} + await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. @@ -5794,7 +8454,8 @@ async def test_list_hot_tablets_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + request = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListHotTabletsAsyncPager) @@ -5838,7 +8499,7 @@ def test_list_hot_tablets_field_headers(): @pytest.mark.asyncio async def test_list_hot_tablets_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5908,7 +8569,7 @@ def test_list_hot_tablets_flattened_error(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5937,7 +8598,7 @@ async def test_list_hot_tablets_flattened_async(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5951,7 +8612,7 @@ async def test_list_hot_tablets_flattened_error_async(): def test_list_hot_tablets_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -5986,13 +8647,17 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_hot_tablets(request={}) + pager = client.list_hot_tablets(request={}, retry=retry, timeout=timeout) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -6001,7 +8666,7 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): def test_list_hot_tablets_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -6043,7 +8708,7 @@ def test_list_hot_tablets_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_hot_tablets_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6093,7 +8758,7 @@ async def test_list_hot_tablets_async_pager(): @pytest.mark.asyncio async def test_list_hot_tablets_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6142,844 +8807,11933 @@ async def test_list_hot_tablets_async_pages(): @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateInstanceRequest, + bigtable_instance_admin.CreateLogicalViewRequest, dict, ], ) -def test_create_instance_rest(request_type): +def test_create_logical_view(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_logical_view(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_instance(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateLogicalViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - + assert isinstance(response, future.Future) -def test_create_instance_rest_required_fields( - request_type=bigtable_instance_admin.CreateInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["parent"] = "" - request_init["instance_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +def test_create_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # verify fields with default values are dropped + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + ) - # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["instanceId"] = "instance_id_value" +def test_create_logical_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "instanceId" in jsonified_request - assert jsonified_request["instanceId"] == "instance_id_value" + # Ensure method has been cached + assert ( + client._transport.create_logical_view in client._transport._wrapped_methods + ) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_logical_view + ] = mock_rpc + request = {} + client.create_logical_view(request) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + client.create_logical_view(request) - response = client.create_instance(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_create_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) -def test_create_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport.create_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "instanceId", - "instance", - "clusters", - ) + # Ensure method has been cached + assert ( + client._client._transport.create_logical_view + in client._client._transport._wrapped_methods ) - ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_logical_view + ] = mock_rpc -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + request = {} + await client.create_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - client = BigtableInstanceAdminClient(transport=transport) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( - bigtable_instance_admin.CreateInstanceRequest() + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + response = await client.create_logical_view(request) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateLogicalViewRequest() + assert args[0] == request - request = bigtable_instance_admin.CreateInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - client.create_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() +@pytest.mark.asyncio +async def test_create_logical_view_async_from_dict(): + await test_create_logical_view_async(request_type=dict) -def test_create_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.CreateInstanceRequest -): +def test_create_logical_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateLogicalViewRequest() - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_instance(request) + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_logical_view(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_create_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateLogicalViewRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - mock_args.update(sample_request) + await client.create_logical_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.create_instance(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_logical_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_logical_view( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].logical_view_id + mock_val = "logical_view_id_value" + assert arg == mock_val -def test_create_instance_rest_flattened_error(transport: str = "rest"): +def test_create_logical_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), + client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", ) -def test_create_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +@pytest.mark.asyncio +async def test_create_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_logical_view( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].logical_view_id + mock_val = "logical_view_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetInstanceRequest, + bigtable_instance_admin.GetLogicalViewRequest, dict, ], ) -def test_get_instance_rest(request_type): +def test_get_logical_view(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView( name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, + query="query_value", + etag="etag_value", + deletion_protection=True, ) + response = client.get_logical_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_instance(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetLogicalViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) + assert isinstance(response, instance.LogicalView) assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_get_instance_rest_required_fields( - request_type=bigtable_instance_admin.GetInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport +def test_get_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetLogicalViewRequest( + name="name_value", ) - # verify fields with default values are dropped + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetLogicalViewRequest( + name="name_value", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +def test_get_logical_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - jsonified_request["name"] = "name_value" + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert client._transport.get_logical_view in client._transport._wrapped_methods - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_logical_view + ] = mock_rpc + request = {} + client.get_logical_view(request) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + client.get_logical_view(request) - response_value = Response() - response_value.status_code = 200 + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.asyncio +async def test_get_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response = client.get_instance(request) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Ensure method has been cached + assert ( + client._client._transport.get_logical_view + in client._client._transport._wrapped_methods + ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_logical_view + ] = mock_rpc -def test_get_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + request = {} + await client.get_logical_view(request) - unset_fields = transport.get_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + await client.get_logical_view(request) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetInstanceRequest.pb( - bigtable_instance_admin.GetInstanceRequest() + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + response = await client.get_logical_view(request) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetLogicalViewRequest() + assert args[0] == request - request = bigtable_instance_admin.GetInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() + # Establish that the response is the type that we expect. + assert isinstance(response, instance.LogicalView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True - client.get_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() +@pytest.mark.asyncio +async def test_get_logical_view_async_from_dict(): + await test_get_logical_view_async(request_type=dict) -def test_get_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetInstanceRequest -): +def test_get_logical_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetLogicalViewRequest() - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_instance(request) + request.name = "name_value" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = instance.LogicalView() + client.get_logical_view(request) -def test_get_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetLogicalViewRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} + request.name = "name_value" - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView() ) - mock_args.update(sample_request) + await client.get_logical_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.get_instance(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_logical_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_logical_view( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_get_instance_rest_flattened_error(transport: str = "rest"): +def test_get_logical_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), + client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), name="name_value", ) -def test_get_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +@pytest.mark.asyncio +async def test_get_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_logical_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), + name="name_value", + ) + @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListInstancesRequest, + bigtable_instance_admin.ListLogicalViewsRequest, dict, ], ) -def test_list_instances_rest(request_type): +def test_list_logical_views(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse( next_page_token="next_page_token_value", ) + response = client.list_logical_views(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_instances(request) - - assert response.raw_page is response + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListLogicalViewsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] + assert isinstance(response, pagers.ListLogicalViewsPager) assert response.next_page_token == "next_page_token_value" -def test_list_instances_rest_required_fields( - request_type=bigtable_instance_admin.ListInstancesRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport +def test_list_logical_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListLogicalViewsRequest( + parent="parent_value", + page_token="page_token_value", ) - # verify fields with default values are dropped + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_logical_views(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListLogicalViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +def test_list_logical_views_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - jsonified_request["parent"] = "parent_value" + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert ( + client._transport.list_logical_views in client._transport._wrapped_methods + ) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_logical_views + ] = mock_rpc + request = {} + client.list_logical_views(request) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + client.list_logical_views(request) - response_value = Response() - response_value.status_code = 200 + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.asyncio +async def test_list_logical_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response = client.list_instances(request) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Ensure method has been cached + assert ( + client._client._transport.list_logical_views + in client._client._transport._wrapped_methods + ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_logical_views + ] = mock_rpc -def test_list_instances_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + request = {} + await client.list_logical_views(request) - unset_fields = transport.list_instances._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + await client.list_logical_views(request) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_instances_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_logical_views_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListLogicalViewsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListInstancesRequest.pb( - bigtable_instance_admin.ListInstancesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListInstancesResponse.to_json( - bigtable_instance_admin.ListInstancesResponse() + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", ) ) + response = await client.list_logical_views(request) - request = bigtable_instance_admin.ListInstancesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListInstancesResponse() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListLogicalViewsRequest() + assert args[0] == request - client.list_instances( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogicalViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" - pre.assert_called_once() - post.assert_called_once() +@pytest.mark.asyncio +async def test_list_logical_views_async_from_dict(): + await test_list_logical_views_async(request_type=dict) -def test_list_instances_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListInstancesRequest -): + +def test_list_logical_views_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListLogicalViewsRequest() - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_instances(request) + request.parent = "parent_value" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + client.list_logical_views(request) -def test_list_instances_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_logical_views_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListLogicalViewsRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse() ) - mock_args.update(sample_request) + await client.list_logical_views(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.list_instances(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_logical_views_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_logical_views( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_instances_rest_flattened_error(transport: str = "rest"): +def test_list_logical_views_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), + client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), parent="parent_value", ) -def test_list_instances_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +@pytest.mark.asyncio +async def test_list_logical_views_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() -@pytest.mark.parametrize( + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_logical_views( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_logical_views_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), + parent="parent_value", + ) + + +def test_list_logical_views_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_logical_views(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.LogicalView) for i in results) + + +def test_list_logical_views_pages(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_logical_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_logical_views_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_logical_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.LogicalView) for i in responses) + + +@pytest.mark.asyncio +async def test_list_logical_views_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_logical_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateLogicalViewRequest, + dict, + ], +) +def test_update_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateLogicalViewRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateLogicalViewRequest() + + +def test_update_logical_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_logical_view + ] = mock_rpc + request = {} + client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_logical_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_logical_view + ] = mock_rpc + + request = {} + await client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_logical_view_async_from_dict(): + await test_update_logical_view_async(request_type=dict) + + +def test_update_logical_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateLogicalViewRequest() + + request.logical_view.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "logical_view.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateLogicalViewRequest() + + request.logical_view.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "logical_view.name=name_value", + ) in kw["metadata"] + + +def test_update_logical_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_logical_view( + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_logical_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_logical_view( + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteLogicalViewRequest, + dict, + ], +) +def test_delete_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteLogicalViewRequest( + name="name_value", + etag="etag_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteLogicalViewRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_logical_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_logical_view + ] = mock_rpc + request = {} + client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_logical_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_logical_view + ] = mock_rpc + + request = {} + await client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_logical_view_async_from_dict(): + await test_delete_logical_view_async(request_type=dict) + + +def test_delete_logical_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteLogicalViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = None + client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteLogicalViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_logical_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_logical_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_logical_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_logical_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateMaterializedViewRequest, + dict, + ], +) +def test_create_materialized_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + ) + + +def test_create_materialized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_materialized_view + ] = mock_rpc + request = {} + client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_materialized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_materialized_view + ] = mock_rpc + + request = {} + await client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_materialized_view_async_from_dict(): + await test_create_materialized_view_async(request_type=dict) + + +def test_create_materialized_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateMaterializedViewRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateMaterializedViewRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_materialized_view( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].materialized_view_id + mock_val = "materialized_view_id_value" + assert arg == mock_val + + +def test_create_materialized_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_materialized_view( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].materialized_view_id + mock_val = "materialized_view_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetMaterializedViewRequest, + dict, + ], +) +def test_get_materialized_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + response = client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True + + +def test_get_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetMaterializedViewRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetMaterializedViewRequest( + name="name_value", + ) + + +def test_get_materialized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_materialized_view + ] = mock_rpc + request = {} + client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_materialized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_materialized_view + ] = mock_rpc + + request = {} + await client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + ) + response = await client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_get_materialized_view_async_from_dict(): + await test_get_materialized_view_async(request_type=dict) + + +def test_get_materialized_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = instance.MaterializedView() + client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView() + ) + await client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_materialized_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_materialized_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_materialized_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListMaterializedViewsRequest, + dict, + ], +) +def test_list_materialized_views(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListMaterializedViewsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMaterializedViewsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_materialized_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListMaterializedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_materialized_views(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListMaterializedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_materialized_views_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_materialized_views + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_materialized_views + ] = mock_rpc + request = {} + client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_materialized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_materialized_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_materialized_views + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_materialized_views + ] = mock_rpc + + request = {} + await client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_materialized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_materialized_views_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListMaterializedViewsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMaterializedViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_materialized_views_async_from_dict(): + await test_list_materialized_views_async(request_type=dict) + + +def test_list_materialized_views_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListMaterializedViewsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_materialized_views_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListMaterializedViewsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse() + ) + await client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_materialized_views_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_materialized_views( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_materialized_views_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_materialized_views_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_materialized_views( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_materialized_views_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", + ) + + +def test_list_materialized_views_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_materialized_views(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in results) + + +def test_list_materialized_views_pages(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_materialized_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_materialized_views_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_materialized_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in responses) + + +@pytest.mark.asyncio +async def test_list_materialized_views_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_materialized_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateMaterializedViewRequest, + dict, + ], +) +def test_update_materialized_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateMaterializedViewRequest() + + +def test_update_materialized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_materialized_view + ] = mock_rpc + request = {} + client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_materialized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_materialized_view + ] = mock_rpc + + request = {} + await client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_materialized_view_async_from_dict(): + await test_update_materialized_view_async(request_type=dict) + + +def test_update_materialized_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + + request.materialized_view.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "materialized_view.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + + request.materialized_view.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "materialized_view.name=name_value", + ) in kw["metadata"] + + +def test_update_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_materialized_view( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_materialized_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_materialized_view( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteMaterializedViewRequest, + dict, + ], +) +def test_delete_materialized_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteMaterializedViewRequest( + name="name_value", + etag="etag_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteMaterializedViewRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_materialized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_materialized_view + ] = mock_rpc + request = {} + client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_materialized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_materialized_view + ] = mock_rpc + + request = {} + await client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_materialized_view_async_from_dict(): + await test_delete_materialized_view_async(request_type=dict) + + +def test_delete_materialized_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = None + client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_materialized_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_materialized_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_materialized_view( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", + ) + + +def test_create_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc + + request = {} + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_instance_rest_required_fields( + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["instance_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["instanceId"] = "instance_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "instanceId" in jsonified_request + assert jsonified_request["instanceId"] == "instance_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_instance._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "instanceId", + "instance", + "clusters", + ) + ) + ) + + +def test_create_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] + ) + + +def test_create_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +def test_get_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc + + request = {} + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_instance_rest_required_fields( + request_type=bigtable_instance_admin.GetInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] + ) + + +def test_get_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), + name="name_value", + ) + + +def test_list_instances_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_instances in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc + + request = {} + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_instances_rest_required_fields( + request_type=bigtable_instance_admin.ListInstancesRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instances._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instances._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_instances(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_instances_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_instances._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + + +def test_list_instances_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] + ) + + +def test_list_instances_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent="parent_value", + ) + + +def test_update_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc + + request = {} + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_instance_rest_required_fields(request_type=instance.Instance): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["display_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["displayName"] = "display_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "displayName" in jsonified_request + assert jsonified_request["displayName"] == "display_name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "put", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("displayName",))) + + +def test_partial_update_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_instance + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_instance + ] = mock_rpc + + request = {} + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_partial_update_instance_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_instance._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.partial_update_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_partial_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.partial_update_instance._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "instance", + "updateMask", + ) + ) + ) + + +def test_partial_update_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} + + # get truthy value for each flattened field + mock_args = dict( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.partial_update_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, + args[1], + ) + + +def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc + + request = {} + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_instance_rest_required_fields( + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_delete_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] + ) + + +def test_delete_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name="name_value", + ) + + +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_cluster_rest_required_fields( + request_type=bigtable_instance_admin.CreateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["clusterId"] = "cluster_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("cluster_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == "cluster_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_cluster(request) + + expected_params = [ + ( + "clusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("clusterId",)) + & set( + ( + "parent", + "clusterId", + "cluster", + ) + ) + ) + + +def test_create_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_create_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +def test_get_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_cluster_rest_required_fields( + request_type=bigtable_instance_admin.GetClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_get_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name="name_value", + ) + + +def test_list_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_clusters_rest_required_fields( + request_type=bigtable_instance_admin.ListClustersRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_clusters(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_clusters_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + + +def test_list_clusters_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_list_clusters_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent="parent_value", + ) + + +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc + + request = {} + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_partial_update_cluster_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.partial_update_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_partial_update_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "cluster", + "updateMask", + ) + ) + ) + + +def test_partial_update_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.partial_update_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" + % client.transport._host, + args[1], + ) + + +def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_cluster_rest_required_fields( + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_delete_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_delete_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name="name_value", + ) + + +def test_create_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc + + request = {} + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["app_profile_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "appProfileId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["appProfileId"] = "app_profile_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "app_profile_id", + "ignore_warnings", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == "app_profile_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_app_profile(request) + + expected_params = [ + ( + "appProfileId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "appProfileId", + "ignoreWarnings", + ) + ) + & set( + ( + "parent", + "appProfileId", + "appProfile", + ) + ) + ) + + +def test_create_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) + + +def test_create_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +def test_get_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_app_profile in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc + + request = {} + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name="name_value", + ) + + +def test_list_app_profiles_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_app_profiles in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_app_profiles + ] = mock_rpc + + request = {} + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_app_profiles_rest_required_fields( + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_app_profiles(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_app_profiles_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_app_profiles._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_app_profiles_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_app_profiles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) + + +def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent="parent_value", + ) + + +def test_list_app_profiles_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_app_profiles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) for i in results) + + pages = list(client.list_app_profiles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc + + request = {} + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set( + ( + "appProfile", + "updateMask", + ) + ) + ) + + +def test_update_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_update_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_app_profile + ] = mock_rpc + + request = {} + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["ignore_warnings"] = False + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "ignoreWarnings" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] + + jsonified_request["name"] = "name_value" + jsonified_request["ignoreWarnings"] = True + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("ignore_warnings",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == True + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_app_profile(request) + + expected_params = [ + ( + "ignoreWarnings", + str(False).lower(), + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("ignoreWarnings",)) + & set( + ( + "name", + "ignoreWarnings", + ) + ) + ) + + +def test_delete_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ignore_warnings=True, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name="name_value", + ignore_warnings=True, + ) + + +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", + ) + ) + ) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_hot_tablets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_hot_tablets + ] = mock_rpc + + request = {} + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_hot_tablets_rest_required_fields( + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "end_time", + "page_size", + "page_token", + "start_time", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_hot_tablets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_hot_tablets_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "endTime", + "pageSize", + "pageToken", + "startTime", + ) + ) + & set(("parent",)) + ) + + +def test_list_hot_tablets_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_hot_tablets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" + % client.transport._host, + args[1], + ) + + +def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", + ) + + +def test_list_hot_tablets_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_hot_tablets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) for i in results) + + pages = list(client.list_hot_tablets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_create_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_logical_view + ] = mock_rpc + + request = {} + client.create_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.CreateLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["logical_view_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "logicalViewId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "logicalViewId" in jsonified_request + assert jsonified_request["logicalViewId"] == request_init["logical_view_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["logicalViewId"] = "logical_view_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("logical_view_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "logicalViewId" in jsonified_request + assert jsonified_request["logicalViewId"] == "logical_view_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_logical_view(request) + + expected_params = [ + ( + "logicalViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("logicalViewId",)) + & set( + ( + "parent", + "logicalViewId", + "logicalView", + ) + ) + ) + + +def test_create_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/logicalViews" + % client.transport._host, + args[1], + ) + + +def test_create_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + +def test_get_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_logical_view in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_logical_view + ] = mock_rpc + + request = {} + client.get_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.GetLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.LogicalView() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.LogicalView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.LogicalView() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.LogicalView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_get_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), + name="name_value", + ) + + +def test_list_logical_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_logical_views in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_logical_views + ] = mock_rpc + + request = {} + client.list_logical_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_logical_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_logical_views_rest_required_fields( + request_type=bigtable_instance_admin.ListLogicalViewsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_logical_views._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_logical_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListLogicalViewsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_logical_views(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_logical_views_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_logical_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_logical_views_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListLogicalViewsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_logical_views(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/logicalViews" + % client.transport._host, + args[1], + ) + + +def test_list_logical_views_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), + parent="parent_value", + ) + + +def test_list_logical_views_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListLogicalViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_logical_views(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.LogicalView) for i in results) + + pages = list(client.list_logical_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_logical_view + ] = mock_rpc + + request = {} + client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask",)) & set(("logicalView",))) + + +def test_update_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_update_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_logical_view + ] = mock_rpc + + request = {} + client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", + ) + + +def test_create_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_materialized_view + ] = mock_rpc + + request = {} + client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["materialized_view_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "materializedViewId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "materializedViewId" in jsonified_request + assert ( + jsonified_request["materializedViewId"] == request_init["materialized_view_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request["materializedViewId"] = "materialized_view_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("materialized_view_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "materializedViewId" in jsonified_request + assert jsonified_request["materializedViewId"] == "materialized_view_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_materialized_view(request) + + expected_params = [ + ( + "materializedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("materializedViewId",)) + & set( + ( + "parent", + "materializedViewId", + "materializedView", + ) + ) + ) + + +def test_create_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/materializedViews" + % client.transport._host, + args[1], + ) + + +def test_create_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + +def test_get_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_materialized_view + ] = mock_rpc + + request = {} + client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.GetMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.MaterializedView() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.MaterializedView() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_get_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", + ) + + +def test_list_materialized_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_materialized_views + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_materialized_views + ] = mock_rpc + + request = {} + client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_materialized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_materialized_views_rest_required_fields( + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_materialized_views._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_materialized_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_materialized_views(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_materialized_views_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_materialized_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_materialized_views_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_materialized_views(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/materializedViews" + % client.transport._host, + args[1], + ) + + +def test_list_materialized_views_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", + ) + + +def test_list_materialized_views_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListMaterializedViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_materialized_views(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in results) + + pages = list(client.list_materialized_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_materialized_view + ] = mock_rpc + + request = {} + client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask",)) & set(("materializedView",))) + + +def test_update_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_update_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_materialized_view + ] = mock_rpc + + request = {} + client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableInstanceAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = instance.Instance() + client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_instances_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = instance.Instance() + client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = instance.Cluster() + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = None + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = instance.AppProfile() + client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = instance.AppProfile() + client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = None + client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = instance.LogicalView() + client.get_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_logical_views_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + client.list_logical_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = None + client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = instance.MaterializedView() + client.get_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_materialized_views_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + client.list_materialized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = None + client.delete_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableInstanceAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + ) + await client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_instances_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + ) + await client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) + ) + await client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_clusters_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_app_profiles_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + await client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_hot_tablets_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_logical_views_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_logical_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_materialized_views_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_materialized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BigtableInstanceAdminClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_create_instance_rest_bad_request( + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateInstanceRequest, + dict, + ], +) +def test_create_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( + bigtable_instance_admin.CreateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_instance_rest_bad_request( + request_type=bigtable_instance_admin.GetInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetInstanceRequest, + dict, + ], +) +def test_get_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetInstanceRequest.pb( + bigtable_instance_admin.GetInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + post_with_metadata.return_value = instance.Instance(), metadata + + client.get_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_instances_rest_bad_request( + request_type=bigtable_instance_admin.ListInstancesRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_instances(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListInstancesRequest, + dict, + ], +) +def test_list_instances_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_instances(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_instances_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_instances_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListInstancesRequest.pb( + bigtable_instance_admin.ListInstancesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListInstancesResponse.to_json( + bigtable_instance_admin.ListInstancesResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListInstancesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListInstancesResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListInstancesResponse(), + metadata, + ) + + client.list_instances( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_update_instance_rest_bad_request(request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_instance(request) + + +@pytest.mark.parametrize( "request_type", [ instance.Instance, dict, ], ) -def test_update_instance_rest(request_type): +def test_update_instance_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -6995,16 +20749,19 @@ def test_update_instance_rest(request_type): state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance(request) # Establish that the response is the type that we expect. @@ -7014,91 +20771,7 @@ def test_update_instance_rest(request_type): assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True - - -def test_update_instance_rest_required_fields(request_type=instance.Instance): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["display_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["displayName"] = "display_name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "displayName" in jsonified_request - assert jsonified_request["displayName"] == "display_name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "put", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.update_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.update_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("displayName",))) + assert response.satisfies_pzi is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -7110,6 +20783,7 @@ def test_update_instance_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -7117,10 +20791,14 @@ def test_update_instance_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_update_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = instance.Instance.pb(instance.Instance()) transcode.return_value = { "method": "post", @@ -7129,10 +20807,11 @@ def test_update_instance_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value request = instance.Instance() metadata = [ @@ -7141,6 +20820,7 @@ def test_update_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = instance.Instance() + post_with_metadata.return_value = instance.Instance(), metadata client.update_instance( request, @@ -7152,18 +20832,17 @@ def test_update_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_instance_rest_bad_request( - transport: str = "rest", request_type=instance.Instance +def test_partial_update_instance_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7171,17 +20850,14 @@ def test_update_instance_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.update_instance(request) - - -def test_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.partial_update_instance(request) @pytest.mark.parametrize( @@ -7191,10 +20867,9 @@ def test_update_instance_rest_error(): dict, ], ) -def test_partial_update_instance_rest(request_type): +def test_partial_update_instance_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -7207,115 +20882,96 @@ def test_partial_update_instance_rest(request_type): "labels": {}, "create_time": {"seconds": 751, "nanos": 543}, "satisfies_pzs": True, + "satisfies_pzi": True, + "tags": {}, } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.partial_update_instance(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_partial_update_instance_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ + "instance" + ] - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - # verify required fields with non-default values are left alone + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.partial_update_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_partial_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.partial_update_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "instance", - "updateMask", - ) - ) - ) + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.partial_update_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -7327,6 +20983,7 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -7336,10 +20993,14 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_partial_update_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( bigtable_instance_admin.PartialUpdateInstanceRequest() ) @@ -7350,12 +21011,11 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value request = bigtable_instance_admin.PartialUpdateInstanceRequest() metadata = [ @@ -7364,6 +21024,7 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.partial_update_instance( request, @@ -7375,28 +21036,17 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_partial_update_instance_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +def test_delete_instance_rest_bad_request( + request_type=bigtable_instance_admin.DeleteInstanceRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, - } + request_init = {"name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7404,73 +21054,14 @@ def test_partial_update_instance_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_instance(request) - - -def test_partial_update_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} - - # get truthy value for each flattened field - mock_args = dict( - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.partial_update_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, - args[1], - ) - - -def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_partial_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_instance(request) @pytest.mark.parametrize( @@ -7480,10 +21071,9 @@ def test_partial_update_instance_rest_error(): dict, ], ) -def test_delete_instance_rest(request_type): +def test_delete_instance_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -7496,104 +21086,217 @@ def test_delete_instance_rest(request_type): return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance(request) # Establish that the response is the type that we expect. assert response is None -def test_delete_instance_rest_required_fields( - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( + bigtable_instance_admin.DeleteInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # verify required fields with default values are now present + request = bigtable_instance_admin.DeleteInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata - jsonified_request["name"] = "name_value" + client.delete_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_create_cluster_rest_bad_request( + request_type=bigtable_instance_admin.CreateClusterRequest, +): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_cluster(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateClusterRequest, + dict, + ], +) +def test_create_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.delete_instance(request) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["cluster"] = { + "name": "name_value", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) -def test_delete_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cluster(request) - unset_fields = transport.delete_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_instance_rest_interceptors(null_interceptor): +def test_create_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7601,16 +21304,26 @@ def test_delete_instance_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( - bigtable_instance_admin.DeleteInstanceRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateClusterRequest.pb( + bigtable_instance_admin.CreateClusterRequest() ) transcode.return_value = { "method": "post", @@ -7619,18 +21332,22 @@ def test_delete_instance_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.CreateClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_instance( + client.create_cluster( request, metadata=[ ("key", "val"), @@ -7639,18 +21356,18 @@ def test_delete_instance_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.DeleteInstanceRequest +def test_get_cluster_rest_bad_request( + request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7658,238 +21375,71 @@ def test_delete_instance_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_instance(request) - - -def test_delete_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] - ) - - -def test_delete_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name="name_value", - ) - - -def test_delete_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_cluster(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateClusterRequest, + bigtable_instance_admin.GetClusterRequest, dict, ], ) -def test_create_cluster_rest(request_type): +def test_get_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_cluster(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_cluster_rest_required_fields( - request_type=bigtable_instance_admin.CreateClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cluster_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - assert "clusterId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == request_init["cluster_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["clusterId"] = "cluster_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("cluster_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == "cluster_id_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.create_cluster(request) - - expected_params = [ - ( - "clusterId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("clusterId",)) - & set( - ( - "parent", - "clusterId", - "cluster", - ) - ) + assert isinstance(response, instance.Cluster) + assert response.name == "name_value" + assert response.location == "location_value" + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X ) + assert response.default_storage_type == common.StorageType.SSD @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cluster_rest_interceptors(null_interceptor): +def test_get_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7897,21 +21447,24 @@ def test_create_cluster_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.CreateClusterRequest.pb( - bigtable_instance_admin.CreateClusterRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetClusterRequest.pb( + bigtable_instance_admin.GetClusterRequest() ) transcode.return_value = { "method": "post", @@ -7920,22 +21473,22 @@ def test_create_cluster_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Cluster.to_json(instance.Cluster()) + req.return_value.content = return_value - request = bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.GetClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = instance.Cluster() + post_with_metadata.return_value = instance.Cluster(), metadata - client.create_cluster( + client.get_cluster( request, metadata=[ ("key", "val"), @@ -7945,38 +21498,17 @@ def test_create_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.CreateClusterRequest +def test_list_clusters_rest_bad_request( + request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7984,212 +21516,187 @@ def test_create_cluster_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_cluster(request) - - -def test_create_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.create_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], - ) - - -def test_create_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), - ) - - -def test_create_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_clusters(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetClusterRequest, + bigtable_instance_admin.ListClustersRequest, dict, ], ) -def test_get_cluster_rest(request_type): +def test_list_clusters_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - default_storage_type=common.StorageType.SSD, + return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_clusters(request) - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD + assert response.raw_page is response + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" -def test_get_cluster_rest_required_fields( - request_type=bigtable_instance_admin.GetClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_clusters_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListClustersRequest.pb( + bigtable_instance_admin.ListClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListClustersResponse.to_json( + bigtable_instance_admin.ListClustersResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_instance_admin.ListClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListClustersResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListClustersResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" + client.list_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_update_cluster_rest_bad_request(request_type=instance.Cluster): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_cluster(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_cluster(request) +@pytest.mark.parametrize( + "request_type", + [ + instance.Cluster, + dict, + ], +) +def test_update_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_get_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_cluster(request) - unset_fields = transport.get_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cluster_rest_interceptors(null_interceptor): +def test_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8197,20 +21704,25 @@ def test_get_cluster_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.GetClusterRequest.pb( - bigtable_instance_admin.GetClusterRequest() - ) + post_with_metadata.assert_not_called() + pb_message = instance.Cluster.pb(instance.Cluster()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -8218,20 +21730,22 @@ def test_get_cluster_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Cluster.to_json(instance.Cluster()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.GetClusterRequest() + request = instance.Cluster() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.Cluster() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_cluster( + client.update_cluster( request, metadata=[ ("key", "val"), @@ -8241,18 +21755,19 @@ def test_get_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetClusterRequest +def test_partial_update_cluster_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8260,209 +21775,144 @@ def test_get_cluster_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_cluster(request) - - -def test_get_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) - - -def test_get_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name="name_value", - ) - - -def test_get_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.partial_update_cluster(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListClustersRequest, + bigtable_instance_admin.PartialUpdateClusterRequest, dict, ], ) -def test_list_clusters_rest(request_type): +def test_partial_update_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ + "cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_clusters(request) - - assert response.raw_page is response + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.partial_update_cluster(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - -def test_list_clusters_rest_required_fields( - request_type=bigtable_instance_admin.ListClustersRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.list_clusters(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_clusters_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_clusters._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_clusters_rest_interceptors(null_interceptor): +def test_partial_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8470,19 +21920,26 @@ def test_list_clusters_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" + transports.BigtableInstanceAdminRestInterceptor, + "post_partial_update_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.ListClustersRequest.pb( - bigtable_instance_admin.ListClustersRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( + bigtable_instance_admin.PartialUpdateClusterRequest() ) transcode.return_value = { "method": "post", @@ -8491,24 +21948,22 @@ def test_list_clusters_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListClustersResponse.to_json( - bigtable_instance_admin.ListClustersResponse() - ) - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.ListClustersRequest() + request = bigtable_instance_admin.PartialUpdateClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListClustersResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.list_clusters( + client.partial_update_cluster( request, metadata=[ ("key", "val"), @@ -8518,18 +21973,17 @@ def test_list_clusters_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_clusters_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListClustersRequest +def test_delete_cluster_rest_bad_request( + request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8537,85 +21991,26 @@ def test_list_clusters_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_clusters(request) - - -def test_list_clusters_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.list_clusters(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], - ) - - -def test_list_clusters_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent="parent_value", - ) - - -def test_list_clusters_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_cluster(request) @pytest.mark.parametrize( "request_type", [ - instance.Cluster, + bigtable_instance_admin.DeleteClusterRequest, dict, ], ) -def test_update_cluster_rest(request_type): +def test_delete_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -8625,23 +22020,23 @@ def test_update_cluster_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cluster(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_cluster_rest_interceptors(null_interceptor): +def test_delete_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8649,20 +22044,18 @@ def test_update_cluster_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Cluster.pb(instance.Cluster()) + pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( + bigtable_instance_admin.DeleteClusterRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -8670,22 +22063,18 @@ def test_update_cluster_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = instance.Cluster() + request = bigtable_instance_admin.DeleteClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - client.update_cluster( + client.delete_cluster( request, metadata=[ ("key", "val"), @@ -8694,19 +22083,16 @@ def test_update_cluster_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_update_cluster_rest_bad_request( - transport: str = "rest", request_type=instance.Cluster +def test_create_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8714,168 +22100,148 @@ def test_update_cluster_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.update_cluster(request) - - -def test_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_app_profile(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.PartialUpdateClusterRequest, + bigtable_instance_admin.CreateAppProfileRequest, dict, ], ) -def test_partial_update_cluster_rest(request_type): +def test_create_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["app_profile"] = { + "name": "name_value", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.partial_update_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_partial_update_cluster_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ + "app_profile" + ] - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - response = client.partial_update_cluster(request) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_partial_update_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_app_profile(request) - unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "cluster", - "updateMask", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_cluster_rest_interceptors(null_interceptor): +def test_create_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8883,21 +22249,24 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_create_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( - bigtable_instance_admin.PartialUpdateClusterRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( + bigtable_instance_admin.CreateAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -8906,22 +22275,22 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.content = return_value - request = bigtable_instance_admin.PartialUpdateClusterRequest() + request = bigtable_instance_admin.CreateAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = instance.AppProfile() + post_with_metadata.return_value = instance.AppProfile(), metadata - client.partial_update_cluster( + client.create_app_profile( request, metadata=[ ("key", "val"), @@ -8931,41 +22300,17 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_partial_update_cluster_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +def test_get_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8973,199 +22318,63 @@ def test_partial_update_cluster_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_cluster(request) - - -def test_partial_update_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - - # get truthy value for each flattened field - mock_args = dict( - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.partial_update_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" - % client.transport._host, - args[1], - ) - - -def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_partial_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_app_profile(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.DeleteClusterRequest, + bigtable_instance_admin.GetAppProfileRequest, dict, ], ) -def test_delete_cluster_rest(request_type): +def test_get_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_cluster(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_cluster_rest_required_fields( - request_type=bigtable_instance_admin.DeleteClusterRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.delete_cluster(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) -def test_delete_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 - unset_fields = transport.delete_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cluster_rest_interceptors(null_interceptor): +def test_get_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9173,16 +22382,24 @@ def test_delete_cluster_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( - bigtable_instance_admin.DeleteClusterRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( + bigtable_instance_admin.GetAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -9191,18 +22408,22 @@ def test_delete_cluster_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.GetAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = instance.AppProfile() + post_with_metadata.return_value = instance.AppProfile(), metadata - client.delete_cluster( + client.get_app_profile( request, metadata=[ ("key", "val"), @@ -9211,18 +22432,18 @@ def test_delete_cluster_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.DeleteClusterRequest +def test_list_app_profiles_rest_bad_request( + request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9230,251 +22451,280 @@ def test_delete_cluster_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_cluster(request) - - -def test_delete_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) - - -def test_delete_cluster_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name="name_value", - ) - - -def test_delete_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_app_profiles(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateAppProfileRequest, + bigtable_instance_admin.ListAppProfilesRequest, dict, ], ) -def test_create_app_profile_rest(request_type): +def test_list_app_profiles_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", + return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_app_profiles(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == "next_page_token_value" + assert response.failed_locations == ["failed_locations_value"] -def test_create_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_app_profiles_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) - request_init = {} - request_init["parent"] = "" - request_init["app_profile_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_app_profiles_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( + bigtable_instance_admin.ListAppProfilesRequest() ) - ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # verify fields with default values are dropped - assert "appProfileId" not in jsonified_request + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( + bigtable_instance_admin.ListAppProfilesResponse() + ) + req.return_value.content = return_value - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + request = bigtable_instance_admin.ListAppProfilesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListAppProfilesResponse(), + metadata, + ) - # verify required fields with default values are now present - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + client.list_app_profiles( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - jsonified_request["parent"] = "parent_value" - jsonified_request["appProfileId"] = "app_profile_id_value" + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "app_profile_id", - "ignore_warnings", - ) + +def test_update_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - jsonified_request.update(unset_fields) + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_app_profile(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == "app_profile_id_value" +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, + ], +) +def test_update_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request_init["app_profile"] = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ + "app_profile" + ] - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - response = client.create_app_profile(request) + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - expected_params = [ - ( - "appProfileId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_create_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_app_profile(request) - unset_fields = transport.create_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "appProfileId", - "ignoreWarnings", - ) - ) - & set( - ( - "parent", - "appProfileId", - "appProfile", - ) - ) - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_app_profile_rest_interceptors(null_interceptor): +def test_update_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9482,19 +22732,26 @@ def test_create_app_profile_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( - bigtable_instance_admin.CreateAppProfileRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( + bigtable_instance_admin.UpdateAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -9503,20 +22760,22 @@ def test_create_app_profile_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.AppProfile() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_app_profile( + client.update_app_profile( request, metadata=[ ("key", "val"), @@ -9526,31 +22785,17 @@ def test_create_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.CreateAppProfileRequest, +def test_delete_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - } + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9558,210 +22803,166 @@ def test_create_app_profile_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.create_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_app_profile(request) -def test_create_app_profile_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, + ], +) +def test_delete_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - mock_args.update(sample_request) + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_app_profile(request) - client.create_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert response is None -def test_create_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( + bigtable_instance_admin.DeleteAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + request = bigtable_instance_admin.DeleteAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + -def test_create_app_profile_rest_error(): +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetAppProfileRequest, + iam_policy_pb2.GetIamPolicyRequest, dict, ], ) -def test_get_app_profile_rest(request_type): +def test_get_iam_policy_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_iam_policy(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -def test_get_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.GetAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.get_app_profile(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_app_profile_rest_interceptors(null_interceptor): +def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9769,20 +22970,23 @@ def test_get_app_profile_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( - bigtable_instance_admin.GetAppProfileRequest() - ) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -9790,20 +22994,22 @@ def test_get_app_profile_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value - request = bigtable_instance_admin.GetAppProfileRequest() + request = iam_policy_pb2.GetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.AppProfile() + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.get_app_profile( + client.get_iam_policy( request, metadata=[ ("key", "val"), @@ -9813,18 +23019,17 @@ def test_get_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_app_profile_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetAppProfileRequest +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9832,225 +23037,180 @@ def test_get_app_profile_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.get_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.set_iam_policy(request) -def test_get_app_profile_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.set_iam_policy(request) - client.get_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" -def test_get_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name="name_value", + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_set_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata + + client.set_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_app_profile_rest_error(): + +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.test_iam_permissions(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListAppProfilesRequest, + iam_policy_pb2.TestIamPermissionsRequest, dict, ], ) -def test_list_app_profiles_rest(request_type): +def test_test_iam_permissions_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_app_profiles(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] - - -def test_list_app_profiles_rest_required_fields( - request_type=bigtable_instance_admin.ListAppProfilesRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.list_app_profiles(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_app_profiles_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_app_profiles._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_app_profiles_rest_interceptors(null_interceptor): +def test_test_iam_permissions_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -10058,20 +23218,23 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" + transports.BigtableInstanceAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( - bigtable_instance_admin.ListAppProfilesRequest() - ) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -10079,24 +23242,27 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListAppProfilesResponse.to_json( - bigtable_instance_admin.ListAppProfilesResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() ) + req.return_value.content = return_value - request = bigtable_instance_admin.ListAppProfilesRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) - client.list_app_profiles( + client.test_iam_permissions( request, metadata=[ ("key", "val"), @@ -10106,18 +23272,17 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_app_profiles_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListAppProfilesRequest +def test_list_hot_tablets_rest_bad_request( + request_type=bigtable_instance_admin.ListHotTabletsRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10125,165 +23290,238 @@ def test_list_app_profiles_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.list_app_profiles(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_hot_tablets(request) -def test_list_app_profiles_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, + ], +) +def test_list_hot_tablets_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.list_app_profiles(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) - -def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_hot_tablets(request) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent="parent_value", - ) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == "next_page_token_value" -def test_list_app_profiles_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_hot_tablets_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_hot_tablets_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( + bigtable_instance_admin.ListHotTabletsRequest() ) - # Two responses for two calls - response = response + response + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( + bigtable_instance_admin.ListHotTabletsResponse() ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + req.return_value.content = return_value - sample_request = {"parent": "projects/sample1/instances/sample2"} + request = bigtable_instance_admin.ListHotTabletsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListHotTabletsResponse(), + metadata, + ) - pager = client.list_app_profiles(request=sample_request) + client.list_hot_tablets( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) for i in results) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - pages = list(client.list_app_profiles(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + +def test_create_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.CreateLogicalViewRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_logical_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.UpdateAppProfileRequest, + bigtable_instance_admin.CreateLogicalViewRequest, dict, ], ) -def test_update_app_profile_rest(request_type): +def test_create_logical_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["logical_view"] = { + "name": "name_value", + "query": "query_value", "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, + "deletion_protection": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateLogicalViewRequest.meta.fields[ + "logical_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["logical_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["logical_view"][field])): + del request_init["logical_view"][field][i][subfield] + else: + del request_init["logical_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -10292,120 +23530,156 @@ def test_update_app_profile_rest(request_type): return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_logical_view(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - + json_return_value = json_format.MessageToJson(return_value) -def test_update_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_logical_view_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_logical_view" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_logical_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateLogicalViewRequest.pb( + bigtable_instance_admin.CreateLogicalViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_instance_admin.CreateLogicalViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", + client.create_logical_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + +def test_get_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.GetLogicalViewRequest, +): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.update_app_profile(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_logical_view(request) -def test_update_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetLogicalViewRequest, + dict, + ], +) +def test_get_logical_view_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - unset_fields = transport.update_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set( - ( - "appProfile", - "updateMask", - ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, ) - ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.LogicalView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_logical_view(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.LogicalView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_app_profile_rest_interceptors(null_interceptor): +def test_get_logical_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -10413,21 +23687,24 @@ def test_update_app_profile_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" + transports.BigtableInstanceAdminRestInterceptor, "post_get_logical_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_logical_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( - bigtable_instance_admin.UpdateAppProfileRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetLogicalViewRequest.pb( + bigtable_instance_admin.GetLogicalViewRequest() ) transcode.return_value = { "method": "post", @@ -10436,22 +23713,22 @@ def test_update_app_profile_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.LogicalView.to_json(instance.LogicalView()) + req.return_value.content = return_value - request = bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.GetLogicalViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = instance.LogicalView() + post_with_metadata.return_value = instance.LogicalView(), metadata - client.update_app_profile( + client.get_logical_view( request, metadata=[ ("key", "val"), @@ -10461,35 +23738,17 @@ def test_update_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.UpdateAppProfileRequest, +def test_list_logical_views_rest_bad_request( + request_type=bigtable_instance_admin.ListLogicalViewsRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - } + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10497,224 +23756,268 @@ def test_update_app_profile_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_app_profile(request) - - -def test_update_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - - # get truthy value for each flattened field - mock_args = dict( - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.update_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) - - -def test_update_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_update_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_logical_views(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.DeleteAppProfileRequest, + bigtable_instance_admin.ListLogicalViewsRequest, dict, ], ) -def test_delete_app_profile_rest(request_type): +def test_list_logical_views_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_logical_views(request) # Establish that the response is the type that we expect. - assert response is None - + assert isinstance(response, pagers.ListLogicalViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_delete_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["name"] = "" - request_init["ignore_warnings"] = False - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_logical_views_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped - assert "ignoreWarnings" not in jsonified_request + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_logical_views" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_logical_views_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_logical_views" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListLogicalViewsRequest.pb( + bigtable_instance_admin.ListLogicalViewsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListLogicalViewsResponse.to_json( + bigtable_instance_admin.ListLogicalViewsResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] + request = bigtable_instance_admin.ListLogicalViewsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListLogicalViewsResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" - jsonified_request["ignoreWarnings"] = True + client.list_logical_views( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("ignore_warnings",)) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == True +def test_update_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, +): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + } request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_logical_view(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateLogicalViewRequest, + dict, + ], +) +def test_update_logical_view_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + } + request_init["logical_view"] = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3", + "query": "query_value", + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateLogicalViewRequest.meta.fields[ + "logical_view" + ] - response = client.delete_app_profile(request) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - expected_params = [ - ( - "ignoreWarnings", - str(False).lower(), - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["logical_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["logical_view"][field])): + del request_init["logical_view"][field][i][subfield] + else: + del request_init["logical_view"][field][subfield] + request = request_type(**request_init) -def test_delete_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") - unset_fields = transport.delete_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("ignoreWarnings",)) - & set( - ( - "name", - "ignoreWarnings", - ) - ) - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_logical_view(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_app_profile_rest_interceptors(null_interceptor): +def test_update_logical_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -10722,16 +24025,26 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_logical_view" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_update_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_logical_view" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( - bigtable_instance_admin.DeleteAppProfileRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateLogicalViewRequest.pb( + bigtable_instance_admin.UpdateLogicalViewRequest() ) transcode.return_value = { "method": "post", @@ -10740,18 +24053,22 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteAppProfileRequest() + request = bigtable_instance_admin.UpdateLogicalViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_app_profile( + client.update_logical_view( request, metadata=[ ("key", "val"), @@ -10760,19 +24077,18 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.DeleteAppProfileRequest, +def test_delete_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10780,206 +24096,236 @@ def test_delete_app_profile_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_app_profile(request) - - -def test_delete_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name="name_value", - ) - - -def test_delete_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_logical_view(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_instance_admin.DeleteLogicalViewRequest, dict, ], ) -def test_get_iam_policy_rest(request_type): +def test_delete_logical_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_logical_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - + assert response is None -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_logical_view_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_logical_view" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteLogicalViewRequest.pb( + bigtable_instance_admin.DeleteLogicalViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # verify required fields with default values are now present + request = bigtable_instance_admin.DeleteLogicalViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata - jsonified_request["resource"] = "resource_value" + client.delete_logical_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" +def test_create_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_materialized_view(request) - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateMaterializedViewRequest, + dict, + ], +) +def test_create_materialized_view_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["materialized_view"] = { + "name": "name_value", + "query": "query_value", + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateMaterializedViewRequest.meta.fields[ + "materialized_view" + ] - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - response = client.get_iam_policy(request) + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["materialized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["materialized_view"][field])): + del request_init["materialized_view"][field][i][subfield] + else: + del request_init["materialized_view"][field][subfield] + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_materialized_view(request) - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): +def test_create_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -10987,18 +24333,27 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_materialized_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, + "post_create_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_materialized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateMaterializedViewRequest.pb( + bigtable_instance_admin.CreateMaterializedViewRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -11006,20 +24361,22 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_instance_admin.CreateMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_iam_policy( + client.create_materialized_view( request, metadata=[ ("key", "val"), @@ -11029,18 +24386,19 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +def test_get_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.GetMaterializedViewRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11048,213 +24406,196 @@ def test_get_iam_policy_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) - - -def test_get_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_get_iam_policy_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_materialized_view(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.SetIamPolicyRequest, + bigtable_instance_admin.GetMaterializedViewRequest, dict, ], ) -def test_set_iam_policy_rest(request_type): +def test_get_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + return_value = instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.set_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_materialized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_materialized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_materialized_view" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_materialized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetMaterializedViewRequest.pb( + bigtable_instance_admin.GetMaterializedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.MaterializedView.to_json(instance.MaterializedView()) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_instance_admin.GetMaterializedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.MaterializedView() + post_with_metadata.return_value = instance.MaterializedView(), metadata - jsonified_request["resource"] = "resource_value" + client.get_materialized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" +def test_list_materialized_views_rest_bad_request( + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, +): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.set_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_materialized_views(request) -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListMaterializedViewsRequest, + dict, + ], +) +def test_list_materialized_views_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", ) - ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_materialized_views(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMaterializedViewsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): +def test_list_materialized_views_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -11262,18 +24603,25 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, "post_list_materialized_views" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, + "post_list_materialized_views_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_materialized_views" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListMaterializedViewsRequest.pb( + bigtable_instance_admin.ListMaterializedViewsRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -11281,20 +24629,27 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.to_json( + bigtable_instance_admin.ListMaterializedViewsResponse() + ) + req.return_value.content = return_value - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_instance_admin.ListMaterializedViewsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() + post.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListMaterializedViewsResponse(), + metadata, + ) - client.set_iam_policy( + client.list_materialized_views( request, metadata=[ ("key", "val"), @@ -11304,18 +24659,21 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_set_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +def test_update_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11323,215 +24681,259 @@ def test_set_iam_policy_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.set_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_materialized_view(request) -def test_set_iam_policy_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateMaterializedViewRequest, + dict, + ], +) +def test_update_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } + request_init["materialized_view"] = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3", + "query": "query_value", + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateMaterializedViewRequest.meta.fields[ + "materialized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["materialized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["materialized_view"][field])): + del request_init["materialized_view"][field][i][subfield] + else: + del request_init["materialized_view"][field][subfield] + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_materialized_view(request) - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_materialized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) + client = BigtableInstanceAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_materialized_view" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_update_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_materialized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( + bigtable_instance_admin.UpdateMaterializedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.update_materialized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_set_iam_policy_rest_error(): +def test_delete_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, +): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_materialized_view(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, + bigtable_instance_admin.DeleteMaterializedViewRequest, dict, ], ) -def test_test_iam_permissions_rest(request_type): +def test_delete_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" - - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.test_iam_permissions(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_materialized_view(request) - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) - ) + # Establish that the response is the type that we expect. + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): +def test_delete_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -11539,18 +24941,18 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): else transports.BigtableInstanceAdminRestInterceptor(), ) client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_materialized_view" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() + pb_message = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( + bigtable_instance_admin.DeleteMaterializedViewRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -11558,22 +24960,18 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_instance_admin.DeleteMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions( + client.delete_materialized_view( request, metadata=[ ("key", "val"), @@ -11582,557 +24980,682 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_test_iam_permissions_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest -): +def test_initialize_client_w_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + client.create_instance(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_flattened(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + client.get_instance(request=None) - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.test_iam_permissions(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_instances_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + assert args[0] == request_msg -def test_test_iam_permissions_rest_error(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_empty_call_rest(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, - ], -) -def test_list_hot_tablets_rest(request_type): + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + client.delete_instance(request=None) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_hot_tablets(request) - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == "next_page_token_value" +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster(request=None) -def test_list_hot_tablets_rest_required_fields( - request_type=bigtable_instance_admin.ListHotTabletsRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # verify fields with default values are dropped + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster(request=None) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() - # verify required fields with default values are now present + assert args[0] == request_msg - jsonified_request["parent"] = "parent_value" - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "end_time", - "page_size", - "page_token", - "start_time", - ) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + client.get_app_profile(request=None) - response_value = Response() - response_value.status_code = 200 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_hot_tablets(request) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + client.list_app_profiles(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() -def test_list_hot_tablets_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert args[0] == request_msg - unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "endTime", - "pageSize", - "pageToken", - "startTime", - ) - ) - & set(("parent",)) + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + client.update_app_profile(request=None) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_hot_tablets_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) + + # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( - bigtable_instance_admin.ListHotTabletsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + type(client.transport.delete_app_profile), "__call__" + ) as call: + client.delete_app_profile(request=None) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListHotTabletsResponse.to_json( - bigtable_instance_admin.ListHotTabletsResponse() - ) - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() - request = bigtable_instance_admin.ListHotTabletsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + assert args[0] == request_msg - client.list_hot_tablets( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy(request=None) -def test_list_hot_tablets_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListHotTabletsRequest -): + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_hot_tablets(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request_msg -def test_list_hot_tablets_rest_flattened(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.list_hot_tablets(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request_msg -def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_logical_view_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + assert args[0] == request_msg -def test_list_hot_tablets_rest_pager(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_logical_view_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + client.get_logical_view(request=None) - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() - pager = client.list_hot_tablets(request=sample_request) + assert args[0] == request_msg - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) for i in results) - pages = list(client.list_hot_tablets(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_logical_views_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + client.list_logical_views(request=None) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + client.create_materialized_view(request=None) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + client.get_materialized_view(request=None) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_materialized_views_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + client.list_materialized_views(request=None) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + client.delete_materialized_view(request=None) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( + +def test_bigtable_instance_admin_rest_lro_client(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have an api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, ) - assert transport.kind == transport_name + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client def test_transport_grpc_default(): @@ -12189,6 +25712,16 @@ def test_bigtable_instance_admin_base_transport(): "set_iam_policy", "test_iam_permissions", "list_hot_tablets", + "create_logical_view", + "get_logical_view", + "list_logical_views", + "update_logical_view", + "delete_logical_view", + "create_materialized_view", + "get_materialized_view", + "list_materialized_views", + "update_materialized_view", + "delete_materialized_view", ) for method in methods: with pytest.raises(NotImplementedError): @@ -12425,23 +25958,6 @@ def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) -def test_bigtable_instance_admin_rest_lro_client(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - @pytest.mark.parametrize( "transport_name", [ @@ -12568,6 +26084,36 @@ def test_bigtable_instance_admin_client_transport_session_collision(transport_na session1 = client1.transport.list_hot_tablets._session session2 = client2.transport.list_hot_tablets._session assert session1 != session2 + session1 = client1.transport.create_logical_view._session + session2 = client2.transport.create_logical_view._session + assert session1 != session2 + session1 = client1.transport.get_logical_view._session + session2 = client2.transport.get_logical_view._session + assert session1 != session2 + session1 = client1.transport.list_logical_views._session + session2 = client2.transport.list_logical_views._session + assert session1 != session2 + session1 = client1.transport.update_logical_view._session + session2 = client2.transport.update_logical_view._session + assert session1 != session2 + session1 = client1.transport.delete_logical_view._session + session2 = client2.transport.delete_logical_view._session + assert session1 != session2 + session1 = client1.transport.create_materialized_view._session + session2 = client2.transport.create_materialized_view._session + assert session1 != session2 + session1 = client1.transport.get_materialized_view._session + session2 = client2.transport.get_materialized_view._session + assert session1 != session2 + session1 = client1.transport.list_materialized_views._session + session2 = client2.transport.list_materialized_views._session + assert session1 != session2 + session1 = client1.transport.update_materialized_view._session + session2 = client2.transport.update_materialized_view._session + assert session1 != session2 + session1 = client1.transport.delete_materialized_view._session + session2 = client2.transport.delete_materialized_view._session + assert session1 != session2 def test_bigtable_instance_admin_grpc_transport_channel(): @@ -12598,6 +26144,7 @@ def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.filterwarnings("ignore::FutureWarning") @pytest.mark.parametrize( "transport_class", [ @@ -12871,6 +26418,64 @@ def test_parse_instance_path(): assert expected == actual +def test_logical_view_path(): + project = "winkle" + instance = "nautilus" + logical_view = "scallop" + expected = ( + "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( + project=project, + instance=instance, + logical_view=logical_view, + ) + ) + actual = BigtableInstanceAdminClient.logical_view_path( + project, instance, logical_view + ) + assert expected == actual + + +def test_parse_logical_view_path(): + expected = { + "project": "abalone", + "instance": "squid", + "logical_view": "clam", + } + path = BigtableInstanceAdminClient.logical_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_logical_view_path(path) + assert expected == actual + + +def test_materialized_view_path(): + project = "whelk" + instance = "octopus" + materialized_view = "oyster" + expected = "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + actual = BigtableInstanceAdminClient.materialized_view_path( + project, instance, materialized_view + ) + assert expected == actual + + +def test_parse_materialized_view_path(): + expected = { + "project": "nudibranch", + "instance": "cuttlefish", + "materialized_view": "mussel", + } + path = BigtableInstanceAdminClient.materialized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_materialized_view_path(path) + assert expected == actual + + def test_table_path(): project = "winkle" instance = "nautilus" @@ -13023,36 +26628,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): +async def test_transport_close_grpc_asyncio(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): @@ -13096,7 +26706,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index aa717a3cb..bff220693 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,11 +24,12 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -36,6 +37,13 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future @@ -46,19 +54,21 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -73,10 +83,32 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -88,6 +120,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -95,38 +138,393 @@ def test__get_default_mtls_endpoint(): sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" - assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None + assert BaseBigtableTableAdminClient._get_default_mtls_endpoint(None) is None assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) +def test__read_environment_variables(): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with pytest.raises(ValueError) as excinfo: + BaseBigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + else: + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BaseBigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BaseBigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test_use_client_cert_effective(): + # Test case 1: Test when `should_use_client_cert` returns True. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=True + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is True + + # Test case 2: Test when `should_use_client_cert` returns False. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should NOT be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=False + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 3: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is True + + # Test case 4: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"} + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 5: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is True + + # Test case 6: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"} + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 7: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is True + + # Test case 8: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"} + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 9: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set. + # In this case, the method should return False, which is the default value. + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, clear=True): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 10: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should raise a ValueError as the environment variable must be either + # "true" or "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + with pytest.raises(ValueError): + BaseBigtableTableAdminClient._use_client_cert_effective() + + # Test case 11: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should return False as the environment variable is set to an invalid value. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + assert BaseBigtableTableAdminClient._use_client_cert_effective() is False + + # Test case 12: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also, + # the GOOGLE_API_CONFIG environment variable is unset. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}): + with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}): + assert ( + BaseBigtableTableAdminClient._use_client_cert_effective() is False + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BaseBigtableTableAdminClient._get_client_cert_source(None, False) is None + assert ( + BaseBigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BaseBigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BaseBigtableTableAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BaseBigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BaseBigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminClient), +) +@mock.patch.object( + BaseBigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BaseBigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BaseBigtableTableAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BaseBigtableTableAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BaseBigtableTableAdminClient._get_universe_domain(None, None) + == BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BaseBigtableTableAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BaseBigtableTableAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BaseBigtableTableAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), + (BaseBigtableTableAdminClient, "grpc"), + (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), + (BaseBigtableTableAdminClient, "rest"), ], ) -def test_bigtable_table_admin_client_from_service_account_info( +def test_base_bigtable_table_admin_client_from_service_account_info( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() @@ -154,7 +552,7 @@ def test_bigtable_table_admin_client_from_service_account_info( (transports.BigtableTableAdminRestTransport, "rest"), ], ) -def test_bigtable_table_admin_client_service_account_always_use_jwt( +def test_base_bigtable_table_admin_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -175,12 +573,12 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( "client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), + (BaseBigtableTableAdminClient, "grpc"), + (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), + (BaseBigtableTableAdminClient, "rest"), ], ) -def test_bigtable_table_admin_client_from_service_account_file( +def test_base_bigtable_table_admin_client_from_service_account_file( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() @@ -207,51 +605,59 @@ def test_bigtable_table_admin_client_from_service_account_file( ) -def test_bigtable_table_admin_client_get_transport_class(): - transport = BigtableTableAdminClient.get_transport_class() +def test_base_bigtable_table_admin_client_get_transport_class(): + transport = BaseBigtableTableAdminClient.get_transport_class() available_transports = [ transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminRestTransport, ] assert transport in available_transports - transport = BigtableTableAdminClient.get_transport_class("grpc") + transport = BaseBigtableTableAdminClient.get_transport_class("grpc") assert transport == transports.BigtableTableAdminGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ( + BaseBigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + ), ], ) @mock.patch.object( - BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + BaseBigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + BaseBigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) -def test_bigtable_table_admin_client_client_options( +def test_base_bigtable_table_admin_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() @@ -281,7 +687,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -311,15 +719,12 @@ def test_bigtable_table_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -329,7 +734,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -347,7 +754,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -361,37 +770,37 @@ def test_bigtable_table_admin_client_client_options( "client_class,transport_class,transport_name,use_client_cert_env", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "true", ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "true", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "false", ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "true", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "false", @@ -399,17 +808,17 @@ def test_bigtable_table_admin_client_client_options( ], ) @mock.patch.object( - BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + BaseBigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + BaseBigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_table_admin_client_mtls_env_auto( +def test_base_bigtable_table_admin_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default @@ -429,7 +838,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -461,7 +872,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -495,7 +908,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -506,19 +921,21 @@ def test_bigtable_table_admin_client_mtls_env_auto( @pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] + "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] ) @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + modify_default_endpoint(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + modify_default_endpoint(BaseBigtableTableAdminAsyncClient), ) -def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class): +def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source( + client_class, +): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". @@ -546,6 +963,119 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl assert api_endpoint == mock_api_endpoint assert cert_source is None + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported". + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset. + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None) + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty). + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() @@ -585,20 +1115,125 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + +@pytest.mark.parametrize( + "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] +) +@mock.patch.object( + BaseBigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminClient), +) +@mock.patch.object( + BaseBigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), +) +def test_base_bigtable_table_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ( + BaseBigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + ), ], ) -def test_bigtable_table_admin_client_client_options_scopes( +def test_base_bigtable_table_admin_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. @@ -611,7 +1246,9 @@ def test_bigtable_table_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -625,26 +1262,26 @@ def test_bigtable_table_admin_client_client_options_scopes( "client_class,transport_class,transport_name,grpc_helpers", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers, ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", None, ), ], ) -def test_bigtable_table_admin_client_client_options_credentials_file( +def test_base_bigtable_table_admin_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. @@ -656,7 +1293,9 @@ def test_bigtable_table_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -666,12 +1305,12 @@ def test_bigtable_table_admin_client_client_options_credentials_file( ) -def test_bigtable_table_admin_client_client_options_from_dict(): +def test_base_bigtable_table_admin_client_client_options_from_dict(): with mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options={"api_endpoint": "squid.clam.whelk"} ) grpc_transport.assert_called_once_with( @@ -691,20 +1330,20 @@ def test_bigtable_table_admin_client_client_options_from_dict(): "client_class,transport_class,transport_name,grpc_helpers", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers, ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) -def test_bigtable_table_admin_client_create_channel_credentials_file( +def test_base_bigtable_table_admin_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. @@ -716,7 +1355,9 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -769,7 +1410,7 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( ], ) def test_create_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -791,7 +1432,8 @@ def test_create_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.CreateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, gba_table.Table) @@ -800,33 +1442,124 @@ def test_create_table(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_create_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_create_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: - client.create_table() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() + assert args[0] == bigtable_table_admin.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) -@pytest.mark.asyncio -async def test_create_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateTableRequest, -): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_create_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_table] = mock_rpc + request = {} + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_table + ] = mock_rpc + + request = {} + await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() @@ -845,7 +1578,8 @@ async def test_create_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.CreateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, gba_table.Table) @@ -860,7 +1594,7 @@ async def test_create_table_async_from_dict(): def test_create_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -890,8 +1624,8 @@ def test_create_table_field_headers(): @pytest.mark.asyncio async def test_create_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -919,7 +1653,7 @@ async def test_create_table_field_headers_async(): def test_create_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -951,7 +1685,7 @@ def test_create_table_flattened(): def test_create_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -968,8 +1702,8 @@ def test_create_table_flattened_error(): @pytest.mark.asyncio async def test_create_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1003,8 +1737,8 @@ async def test_create_table_flattened_async(): @pytest.mark.asyncio async def test_create_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1026,7 +1760,7 @@ async def test_create_table_flattened_error_async(): ], ) def test_create_table_from_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1046,28 +1780,137 @@ def test_create_table_from_snapshot(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_create_table_from_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_table_from_snapshot), "__call__" ) as call: - client.create_table_from_snapshot() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_table_from_snapshot(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_create_table_from_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_table_from_snapshot + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_table_from_snapshot + ] = mock_rpc + request = {} + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_table_from_snapshot + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_table_from_snapshot + ] = mock_rpc + + request = {} + await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1075,8 +1918,8 @@ async def test_create_table_from_snapshot_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1097,7 +1940,8 @@ async def test_create_table_from_snapshot_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1109,7 +1953,7 @@ async def test_create_table_from_snapshot_async_from_dict(): def test_create_table_from_snapshot_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1141,8 +1985,8 @@ def test_create_table_from_snapshot_field_headers(): @pytest.mark.asyncio async def test_create_table_from_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1174,7 +2018,7 @@ async def test_create_table_from_snapshot_field_headers_async(): def test_create_table_from_snapshot_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1208,7 +2052,7 @@ def test_create_table_from_snapshot_flattened(): def test_create_table_from_snapshot_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1225,8 +2069,8 @@ def test_create_table_from_snapshot_flattened_error(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1264,8 +2108,8 @@ async def test_create_table_from_snapshot_flattened_async(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1287,7 +2131,7 @@ async def test_create_table_from_snapshot_flattened_error_async(): ], ) def test_list_tables(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1307,35 +2151,127 @@ def test_list_tables(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.ListTablesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListTablesPager) assert response.next_page_token == "next_page_token_value" -def test_list_tables_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_list_tables_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListTablesRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - client.list_tables() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_tables(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() + assert args[0] == bigtable_table_admin.ListTablesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_tables_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_tables in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc + request = {} + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_tables_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_tables + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_tables + ] = mock_rpc + + request = {} + await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1356,7 +2292,8 @@ async def test_list_tables_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.ListTablesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListTablesAsyncPager) @@ -1369,7 +2306,7 @@ async def test_list_tables_async_from_dict(): def test_list_tables_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1399,8 +2336,8 @@ def test_list_tables_field_headers(): @pytest.mark.asyncio async def test_list_tables_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1430,7 +2367,7 @@ async def test_list_tables_field_headers_async(): def test_list_tables_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1454,7 +2391,7 @@ def test_list_tables_flattened(): def test_list_tables_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1469,8 +2406,8 @@ def test_list_tables_flattened_error(): @pytest.mark.asyncio async def test_list_tables_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1498,8 +2435,8 @@ async def test_list_tables_flattened_async(): @pytest.mark.asyncio async def test_list_tables_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1512,8 +2449,8 @@ async def test_list_tables_flattened_error_async(): def test_list_tables_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -1548,13 +2485,17 @@ def test_list_tables_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_tables(request={}) + pager = client.list_tables(request={}, retry=retry, timeout=timeout) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -1562,8 +2503,8 @@ def test_list_tables_pager(transport_name: str = "grpc"): def test_list_tables_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -1604,8 +2545,8 @@ def test_list_tables_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_tables_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1654,8 +2595,8 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1709,7 +2650,7 @@ async def test_list_tables_async_pages(): ], ) def test_get_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1731,7 +2672,8 @@ def test_get_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.GetTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, table.Table) @@ -1740,28 +2682,115 @@ def test_get_table(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_get_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_get_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetTableRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: - client.get_table() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() + assert args[0] == bigtable_table_admin.GetTableRequest( + name="name_value", + ) + + +def test_get_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_table] = mock_rpc + request = {} + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_table + ] = mock_rpc + + request = {} + await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1784,7 +2813,8 @@ async def test_get_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.GetTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, table.Table) @@ -1799,7 +2829,7 @@ async def test_get_table_async_from_dict(): def test_get_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1829,8 +2859,8 @@ def test_get_table_field_headers(): @pytest.mark.asyncio async def test_get_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1858,7 +2888,7 @@ async def test_get_table_field_headers_async(): def test_get_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1882,7 +2912,7 @@ def test_get_table_flattened(): def test_get_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1897,8 +2927,8 @@ def test_get_table_flattened_error(): @pytest.mark.asyncio async def test_get_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1924,8 +2954,8 @@ async def test_get_table_flattened_async(): @pytest.mark.asyncio async def test_get_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1945,7 +2975,7 @@ async def test_get_table_flattened_error_async(): ], ) def test_update_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1963,35 +2993,131 @@ def test_update_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.UpdateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_update_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_update_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateTableRequest() + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_table), "__call__") as call: - client.update_table() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.UpdateTableRequest() +def test_update_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + request = {} + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_table + ] = mock_rpc + + request = {} + await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + @pytest.mark.asyncio async def test_update_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UpdateTableRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2010,7 +3136,8 @@ async def test_update_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.UpdateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2022,7 +3149,7 @@ async def test_update_table_async_from_dict(): def test_update_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2052,8 +3179,8 @@ def test_update_table_field_headers(): @pytest.mark.asyncio async def test_update_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2083,7 +3210,7 @@ async def test_update_table_field_headers_async(): def test_update_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2111,7 +3238,7 @@ def test_update_table_flattened(): def test_update_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2127,8 +3254,8 @@ def test_update_table_flattened_error(): @pytest.mark.asyncio async def test_update_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2160,8 +3287,8 @@ async def test_update_table_flattened_async(): @pytest.mark.asyncio async def test_update_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2182,7 +3309,7 @@ async def test_update_table_flattened_error_async(): ], ) def test_delete_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2200,52 +3327,143 @@ def test_delete_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.DeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None -def test_delete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_delete_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteTableRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - client.delete_table() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() + assert args[0] == bigtable_table_admin.DeleteTableRequest( + name="name_value", + ) -@pytest.mark.asyncio -async def test_delete_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteTableRequest, -): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_delete_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_table(request) + # Ensure method has been cached + assert client._transport.delete_table in client._transport._wrapped_methods - # Establish that the underlying gRPC stub method was called. + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc + request = {} + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_table + ] = mock_rpc + + request = {} + await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.DeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2257,7 +3475,7 @@ async def test_delete_table_async_from_dict(): def test_delete_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2287,8 +3505,8 @@ def test_delete_table_field_headers(): @pytest.mark.asyncio async def test_delete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2316,7 +3534,7 @@ async def test_delete_table_field_headers_async(): def test_delete_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2340,7 +3558,7 @@ def test_delete_table_flattened(): def test_delete_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2355,8 +3573,8 @@ def test_delete_table_flattened_error(): @pytest.mark.asyncio async def test_delete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2382,8 +3600,8 @@ async def test_delete_table_flattened_async(): @pytest.mark.asyncio async def test_delete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2403,7 +3621,7 @@ async def test_delete_table_flattened_error_async(): ], ) def test_undelete_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2421,26 +3639,126 @@ def test_undelete_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.UndeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_undelete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_undelete_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UndeleteTableRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - client.undelete_table() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.undelete_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() + assert args[0] == bigtable_table_admin.UndeleteTableRequest( + name="name_value", + ) + + +def test_undelete_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.undelete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc + request = {} + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_undelete_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.undelete_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.undelete_table + ] = mock_rpc + + request = {} + await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2448,8 +3766,8 @@ async def test_undelete_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UndeleteTableRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2468,7 +3786,8 @@ async def test_undelete_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.UndeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2480,7 +3799,7 @@ async def test_undelete_table_async_from_dict(): def test_undelete_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2510,8 +3829,8 @@ def test_undelete_table_field_headers(): @pytest.mark.asyncio async def test_undelete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2541,7 +3860,7 @@ async def test_undelete_table_field_headers_async(): def test_undelete_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2565,7 +3884,7 @@ def test_undelete_table_flattened(): def test_undelete_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2580,8 +3899,8 @@ def test_undelete_table_flattened_error(): @pytest.mark.asyncio async def test_undelete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2609,8 +3928,8 @@ async def test_undelete_table_flattened_async(): @pytest.mark.asyncio async def test_undelete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2625,12 +3944,12 @@ async def test_undelete_table_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, + bigtable_table_admin.CreateAuthorizedViewRequest, dict, ], ) -def test_modify_column_families(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_create_authorized_view(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2641,53 +3960,153 @@ def test_modify_column_families(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.modify_column_families(request) + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, future.Future) -def test_modify_column_families_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_create_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - client.modify_column_families() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_authorized_view(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + +def test_create_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc + request = {} + client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_modify_column_families_async( +async def test_create_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_authorized_view + ] = mock_rpc + + request = {} + await client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2697,52 +4116,46 @@ async def test_modify_column_families_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + operations_pb2.Operation(name="operations/spam") ) - response = await client.modify_column_families(request) + response = await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_modify_column_families_async_from_dict(): - await test_modify_column_families_async(request_type=dict) +async def test_create_authorized_view_async_from_dict(): + await test_create_authorized_view_async(request_type=dict) -def test_modify_column_families_field_headers(): - client = BigtableTableAdminClient( +def test_create_authorized_view_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - call.return_value = table.Table() - client.modify_column_families(request) + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -2753,28 +4166,30 @@ def test_modify_column_families_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_modify_column_families_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_authorized_view_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.modify_column_families(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -2785,133 +4200,125 @@ async def test_modify_column_families_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] -def test_modify_column_families_flattened(): - client = BigtableTableAdminClient( +def test_create_authorized_view_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + client.create_authorized_view( + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") + assert arg == mock_val + arg = args[0].authorized_view_id + mock_val = "authorized_view_id_value" assert arg == mock_val -def test_modify_column_families_flattened_error(): - client = BigtableTableAdminClient( +def test_create_authorized_view_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) @pytest.mark.asyncio -async def test_modify_column_families_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_authorized_view_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + response = await client.create_authorized_view( + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") + assert arg == mock_val + arg = args[0].authorized_view_id + mock_val = "authorized_view_id_value" assert arg == mock_val @pytest.mark.asyncio -async def test_modify_column_families_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_authorized_view_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + await client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DropRowRangeRequest, + bigtable_table_admin.ListAuthorizedViewsRequest, dict, ], ) -def test_drop_row_range(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_list_authorized_views(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2921,191 +4328,147 @@ def test_drop_row_range(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = None - response = client.drop_row_range(request) + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListAuthorizedViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_drop_row_range_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_list_authorized_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - client.drop_row_range() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - -@pytest.mark.asyncio -async def test_drop_row_range_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DropRowRangeRequest, -): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListAuthorizedViewsRequest( + parent="parent_value", + page_token="page_token_value", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_authorized_views(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_drop_row_range_async_from_dict(): - await test_drop_row_range_async(request_type=dict) + assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) -def test_drop_row_range_field_headers(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) +def test_list_authorized_views_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - request.name = "name_value" + # Ensure method has been cached + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = None - client.drop_row_range(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc + request = {} + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_drop_row_range_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = "name_value" - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request) + assert mock_rpc.call_count == 1 - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request + client.list_authorized_views(request) - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, - ], -) -def test_generate_consistency_token(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +@pytest.mark.asyncio +async def test_list_authorized_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + # Ensure method has been cached + assert ( + client._client._transport.list_authorized_views + in client._client._transport._wrapped_methods ) - response = client.generate_consistency_token(request) - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_authorized_views + ] = mock_rpc - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + request = {} + await client.list_authorized_views(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_generate_consistency_token_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + await client.list_authorized_views(request) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - client.generate_consistency_token() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_generate_consistency_token_async( +async def test_list_authorized_views_async( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3115,48 +4478,49 @@ async def test_generate_consistency_token_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", ) ) - response = await client.generate_consistency_token(request) + response = await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + assert isinstance(response, pagers.ListAuthorizedViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio -async def test_generate_consistency_token_async_from_dict(): - await test_generate_consistency_token_async(request_type=dict) +async def test_list_authorized_views_async_from_dict(): + await test_list_authorized_views_async(request_type=dict) -def test_generate_consistency_token_field_headers(): - client = BigtableTableAdminClient( +def test_list_authorized_views_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request) + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3167,30 +4531,30 @@ def test_generate_consistency_token_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_generate_consistency_token_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_authorized_views_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() + bigtable_table_admin.ListAuthorizedViewsResponse() ) - await client.generate_consistency_token(request) + await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3201,158 +4565,458 @@ async def test_generate_consistency_token_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] -def test_generate_consistency_token_flattened(): - client = BigtableTableAdminClient( +def test_list_authorized_views_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.generate_consistency_token( - name="name_value", + client.list_authorized_views( + parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val -def test_generate_consistency_token_flattened_error(): - client = BigtableTableAdminClient( +def test_list_authorized_views_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", ) @pytest.mark.asyncio -async def test_generate_consistency_token_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_authorized_views_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() + bigtable_table_admin.ListAuthorizedViewsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.generate_consistency_token( - name="name_value", + response = await client.list_authorized_views( + parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio -async def test_generate_consistency_token_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_authorized_views_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", + await client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CheckConsistencyRequest, - dict, - ], -) -def test_check_consistency(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_list_authorized_views_pager(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport=transport_name, ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, ) - response = client.check_consistency(request) - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_authorized_views(request={}, retry=retry, timeout=timeout) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in results) + + +def test_list_authorized_views_pages(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_authorized_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_authorized_views_async_pager(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_authorized_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in responses) + + +@pytest.mark.asyncio +async def test_list_authorized_views_async_pages(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_authorized_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetAuthorizedViewRequest, + dict, + ], +) +def test_get_authorized_view(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + response = client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.GetAuthorizedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_check_consistency_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_get_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetAuthorizedViewRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: - client.check_consistency() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_authorized_view(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest( + name="name_value", + ) + + +def test_get_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_authorized_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_authorized_view + ] = mock_rpc + request = {} + client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_check_consistency_async( +async def test_get_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CheckConsistencyRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_authorized_view + ] = mock_rpc + + request = {} + await client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetAuthorizedViewRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3362,48 +5026,53 @@ async def test_check_consistency_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, ) ) - response = await client.check_consistency(request) + response = await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.asyncio -async def test_check_consistency_async_from_dict(): - await test_check_consistency_async(request_type=dict) +async def test_get_authorized_view_async_from_dict(): + await test_get_authorized_view_async(request_type=dict) -def test_check_consistency_field_headers(): - client = BigtableTableAdminClient( +def test_get_authorized_view_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request) + call.return_value = table.AuthorizedView() + client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3419,25 +5088,25 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio -async def test_check_consistency_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_authorized_view_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() + table.AuthorizedView() ) - await client.check_consistency(request) + await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3452,22 +5121,21 @@ async def test_check_consistency_field_headers_async(): ) in kw["metadata"] -def test_check_consistency_flattened(): - client = BigtableTableAdminClient( +def test_get_authorized_view_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() + call.return_value = table.AuthorizedView() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.check_consistency( + client.get_authorized_view( name="name_value", - consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -3477,47 +5145,42 @@ def test_check_consistency_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val -def test_check_consistency_flattened_error(): - client = BigtableTableAdminClient( +def test_get_authorized_view_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", - consistency_token="consistency_token_value", ) @pytest.mark.asyncio -async def test_check_consistency_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_authorized_view_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() + call.return_value = table.AuthorizedView() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() + table.AuthorizedView() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.check_consistency( + response = await client.get_authorized_view( name="name_value", - consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -3527,36 +5190,32 @@ async def test_check_consistency_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val @pytest.mark.asyncio -async def test_check_consistency_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_authorized_view_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + await client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", - consistency_token="consistency_token_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.SnapshotTableRequest, + bigtable_table_admin.UpdateAuthorizedViewRequest, dict, ], ) -def test_snapshot_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_update_authorized_view(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3566,87 +5225,197 @@ def test_snapshot_table(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.snapshot_table(request) + response = client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_snapshot_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_update_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - client.snapshot_table() + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_authorized_view(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() -@pytest.mark.asyncio -async def test_snapshot_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.SnapshotTableRequest, -): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_update_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Ensure method has been cached + assert ( + client._transport.update_authorized_view + in client._transport._wrapped_methods ) - response = await client.snapshot_table(request) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_authorized_view + ] = mock_rpc + request = {} + client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_snapshot_table_async_from_dict(): - await test_snapshot_table_async(request_type=dict) +async def test_update_authorized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -def test_snapshot_table_field_headers(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + # Ensure method has been cached + assert ( + client._client._transport.update_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_authorized_view + ] = mock_rpc + + request = {} + await client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_authorized_view_async_from_dict(): + await test_update_authorized_view_async(request_type=dict) + + +def test_update_authorized_view_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() - request.name = "name_value" + request.authorized_view.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.snapshot_table(request) + client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3657,28 +5426,30 @@ def test_snapshot_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "authorized_view.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_snapshot_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_authorized_view_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() - request.name = "name_value" + request.authorized_view.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.snapshot_table(request) + await client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3689,71 +5460,65 @@ async def test_snapshot_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "authorized_view.name=name_value", ) in kw["metadata"] -def test_snapshot_table_flattened(): - client = BigtableTableAdminClient( +def test_update_authorized_view_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.update_authorized_view( + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" - assert arg == mock_val - arg = args[0].description - mock_val = "description_value" + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val -def test_snapshot_table_flattened_error(): - client = BigtableTableAdminClient( +def test_update_authorized_view_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio -async def test_snapshot_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_authorized_view_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") @@ -3762,58 +5527,48 @@ async def test_snapshot_table_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + response = await client.update_authorized_view( + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") assert arg == mock_val - arg = args[0].description - mock_val = "description_value" + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio -async def test_snapshot_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_authorized_view_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + await client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetSnapshotRequest, + bigtable_table_admin.DeleteAuthorizedViewRequest, dict, ], ) -def test_get_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_delete_authorized_view(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3823,52 +5578,144 @@ def test_get_snapshot(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - response = client.get_snapshot(request) + call.return_value = None + response = client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + assert response is None -def test_get_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_delete_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteAuthorizedViewRequest( + name="name_value", + etag="etag_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - client.get_snapshot() + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_authorized_view(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() + assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_authorized_view + ] = mock_rpc + request = {} + client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_get_snapshot_async( +async def test_delete_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GetSnapshotRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_authorized_view + ] = mock_rpc + + request = {} + await client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3877,51 +5724,45 @@ async def test_get_snapshot_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - ) - response = await client.get_snapshot(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + assert response is None @pytest.mark.asyncio -async def test_get_snapshot_async_from_dict(): - await test_get_snapshot_async(request_type=dict) +async def test_delete_authorized_view_async_from_dict(): + await test_delete_authorized_view_async(request_type=dict) -def test_get_snapshot_field_headers(): - client = BigtableTableAdminClient( +def test_delete_authorized_view_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = table.Snapshot() - client.get_snapshot(request) + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = None + client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3937,21 +5778,23 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio -async def test_get_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_authorized_view_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - await client.get_snapshot(request) + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3966,18 +5809,20 @@ async def test_get_snapshot_field_headers_async(): ) in kw["metadata"] -def test_get_snapshot_flattened(): - client = BigtableTableAdminClient( +def test_delete_authorized_view_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() + call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_snapshot( + client.delete_authorized_view( name="name_value", ) @@ -3990,35 +5835,37 @@ def test_get_snapshot_flattened(): assert arg == mock_val -def test_get_snapshot_flattened_error(): - client = BigtableTableAdminClient( +def test_delete_authorized_view_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) @pytest.mark.asyncio -async def test_get_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_authorized_view_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() + call.return_value = None - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_snapshot( + response = await client.delete_authorized_view( name="name_value", ) @@ -4032,16 +5879,16 @@ async def test_get_snapshot_flattened_async(): @pytest.mark.asyncio -async def test_get_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_authorized_view_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), + await client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) @@ -4049,12 +5896,12 @@ async def test_get_snapshot_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListSnapshotsRequest, + bigtable_table_admin.ModifyColumnFamiliesRequest, dict, ], ) -def test_list_snapshots(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_modify_column_families(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4064,46 +5911,149 @@ def test_list_snapshots(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", + call.return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) - response = client.list_snapshots(request) + response = client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True -def test_list_snapshots_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_modify_column_families_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ModifyColumnFamiliesRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - client.list_snapshots() + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.modify_column_families(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest( + name="name_value", + ) + + +def test_modify_column_families_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.modify_column_families + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.modify_column_families + ] = mock_rpc + request = {} + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_list_snapshots_async( +async def test_modify_column_families_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListSnapshotsRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.modify_column_families + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.modify_column_families + ] = mock_rpc + + request = {} + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_modify_column_families_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4112,45 +6062,54 @@ async def test_list_snapshots_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) ) - response = await client.list_snapshots(request) + response = await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsAsyncPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.asyncio -async def test_list_snapshots_async_from_dict(): - await test_list_snapshots_async(request_type=dict) +async def test_modify_column_families_async_from_dict(): + await test_modify_column_families_async(request_type=dict) -def test_list_snapshots_field_headers(): - client = BigtableTableAdminClient( +def test_modify_column_families_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client.list_snapshots(request) + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4161,28 +6120,28 @@ def test_list_snapshots_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_list_snapshots_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_modify_column_families_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() - ) - await client.list_snapshots(request) + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4193,337 +6152,269 @@ async def test_list_snapshots_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] -def test_list_snapshots_flattened(): - client = BigtableTableAdminClient( +def test_modify_column_families_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() + call.return_value = table.Table() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_snapshots( - parent="parent_value", + client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] assert arg == mock_val -def test_list_snapshots_flattened_error(): - client = BigtableTableAdminClient( +def test_modify_column_families_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) @pytest.mark.asyncio -async def test_list_snapshots_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_modify_column_families_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() + call.return_value = table.Table() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_snapshots( - parent="parent_value", + response = await client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] assert arg == mock_val @pytest.mark.asyncio -async def test_list_snapshots_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_modify_column_families_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", + await client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) -def test_list_snapshots_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DropRowRangeRequest, + dict, + ], +) +def test_drop_row_range(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.drop_row_range(request) - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_snapshots(request={}) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DropRowRangeRequest() + assert args[0] == request - assert pager._metadata == metadata + # Establish that the response is the type that we expect. + assert response is None - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) +def test_drop_row_range_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) -def test_list_snapshots_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, - transport=transport_name, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DropRowRangeRequest( + name="name_value", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.drop_row_range(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest( + name="name_value", ) - pages = list(client.list_snapshots(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_snapshots_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, +def test_drop_row_range_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - async_pager = await client.list_snapshots( - request={}, + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.drop_row_range in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + request = {} + client.drop_row_range(request) - assert len(responses) == 6 - assert all(isinstance(i, table.Snapshot) for i in responses) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.drop_row_range(request) -@pytest.mark.asyncio -async def test_list_snapshots_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, + +@pytest.mark.asyncio +async def test_drop_row_range_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_snapshots(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, - ], -) -def test_delete_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Ensure method has been cached + assert ( + client._client._transport.drop_row_range + in client._client._transport._wrapped_methods + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.drop_row_range + ] = mock_rpc - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_snapshot(request) + request = {} + await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - - # Establish that the response is the type that we expect. - assert response is None + assert mock_rpc.call_count == 1 + await client.drop_row_range(request) -def test_delete_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - client.delete_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_delete_snapshot_async( +async def test_drop_row_range_async( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteSnapshotRequest, + request_type=bigtable_table_admin.DropRowRangeRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4532,40 +6423,41 @@ async def test_delete_snapshot_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_snapshot(request) + response = await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio -async def test_delete_snapshot_async_from_dict(): - await test_delete_snapshot_async(request_type=dict) +async def test_drop_row_range_async_from_dict(): + await test_drop_row_range_async(request_type=dict) -def test_delete_snapshot_field_headers(): - client = BigtableTableAdminClient( +def test_drop_row_range_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = None - client.delete_snapshot(request) + client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4581,21 +6473,21 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio -async def test_delete_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_drop_row_range_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_snapshot(request) + await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4610,141 +6502,163 @@ async def test_delete_snapshot_field_headers_async(): ) in kw["metadata"] -def test_delete_snapshot_flattened(): - client = BigtableTableAdminClient( +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], +) +def test_generate_consistency_token(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_snapshot( - name="name_value", + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", ) + response = client.generate_consistency_token(request) - # Establish that the underlying call was made with the expected - # request object values. + # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" -def test_delete_snapshot_flattened_error(): - client = BigtableTableAdminClient( +def test_generate_consistency_token_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.generate_consistency_token(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest( name="name_value", ) -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) +def test_generate_consistency_token_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_snapshot( - name="name_value", + # Ensure method has been cached + assert ( + client._transport.generate_consistency_token + in client._transport._wrapped_methods ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_consistency_token + ] = mock_rpc + request = {} + client.generate_consistency_token(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) + client.generate_consistency_token(request) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateBackupRequest, - dict, - ], -) -def test_create_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +@pytest.mark.asyncio +async def test_generate_consistency_token_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_backup(request) + # Ensure method has been cached + assert ( + client._client._transport.generate_consistency_token + in client._client._transport._wrapped_methods + ) - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.generate_consistency_token + ] = mock_rpc - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + request = {} + await client.generate_consistency_token(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_create_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + await client.generate_consistency_token(request) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - client.create_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_create_backup_async( +async def test_generate_consistency_token_async( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateBackupRequest, + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4753,42 +6667,50 @@ async def test_create_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) ) - response = await client.create_backup(request) + response = await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" @pytest.mark.asyncio -async def test_create_backup_async_from_dict(): - await test_create_backup_async(request_type=dict) +async def test_generate_consistency_token_async_from_dict(): + await test_generate_consistency_token_async(request_type=dict) -def test_create_backup_field_headers(): - client = BigtableTableAdminClient( +def test_generate_consistency_token_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request) + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4799,28 +6721,30 @@ def test_create_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_create_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_generate_consistency_token_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") + bigtable_table_admin.GenerateConsistencyTokenResponse() ) - await client.create_backup(request) + await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4831,121 +6755,105 @@ async def test_create_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] -def test_create_backup_flattened(): - client = BigtableTableAdminClient( +def test_generate_consistency_token_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + client.generate_consistency_token( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_create_backup_flattened_error(): - client = BigtableTableAdminClient( +def test_generate_consistency_token_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_create_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_generate_consistency_token_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + bigtable_table_admin.GenerateConsistencyTokenResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + response = await client.generate_consistency_token( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_create_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_generate_consistency_token_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + await client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetBackupRequest, + bigtable_table_admin.CheckConsistencyRequest, dict, ], ) -def test_get_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_check_consistency(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4955,53 +6863,144 @@ def test_get_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, + call.return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, ) - response = client.get_backup(request) + response = client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True -def test_get_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_check_consistency_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - client.get_backup() + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_consistency(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + assert args[0] == bigtable_table_admin.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) -@pytest.mark.asyncio -async def test_get_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest +def test_check_consistency_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.check_consistency in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc + request = {} + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_check_consistency_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.check_consistency + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.check_consistency + ] = mock_rpc + + request = {} + await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_check_consistency_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5010,53 +7009,50 @@ async def test_get_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, ) ) - response = await client.get_backup(request) + response = await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True @pytest.mark.asyncio -async def test_get_backup_async_from_dict(): - await test_get_backup_async(request_type=dict) +async def test_check_consistency_async_from_dict(): + await test_check_consistency_async(request_type=dict) -def test_get_backup_field_headers(): - client = BigtableTableAdminClient( +def test_check_consistency_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = table.Backup() - client.get_backup(request) + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5072,21 +7068,25 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio -async def test_get_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_check_consistency_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.get_backup(request) + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5101,19 +7101,22 @@ async def test_get_backup_field_headers_async(): ) in kw["metadata"] -def test_get_backup_flattened(): - client = BigtableTableAdminClient( +def test_check_consistency_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = bigtable_table_admin.CheckConsistencyResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_backup( + client.check_consistency( name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -5123,38 +7126,47 @@ def test_get_backup_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val -def test_get_backup_flattened_error(): - client = BigtableTableAdminClient( +def test_check_consistency_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), name="name_value", + consistency_token="consistency_token_value", ) @pytest.mark.asyncio -async def test_get_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_check_consistency_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = bigtable_table_admin.CheckConsistencyResponse() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_backup( + response = await client.check_consistency( name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -5164,32 +7176,36 @@ async def test_get_backup_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val @pytest.mark.asyncio -async def test_get_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_check_consistency_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_backup( - bigtable_table_admin.GetBackupRequest(), + await client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), name="name_value", + consistency_token="consistency_token_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateBackupRequest, + bigtable_table_admin.SnapshotTableRequest, dict, ], ) -def test_update_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_snapshot_table(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5199,54 +7215,149 @@ def test_update_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - response = client.update_backup(request) + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, future.Future) -def test_update_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_snapshot_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - client.update_backup() + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.snapshot_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + assert args[0] == bigtable_table_admin.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_snapshot_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.snapshot_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + request = {} + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_update_backup_async( +async def test_snapshot_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateBackupRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.snapshot_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.snapshot_table + ] = mock_rpc + + request = {} + await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_snapshot_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.SnapshotTableRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5255,53 +7366,43 @@ async def test_update_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) + operations_pb2.Operation(name="operations/spam") ) - response = await client.update_backup(request) + response = await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_update_backup_async_from_dict(): - await test_update_backup_async(request_type=dict) +async def test_snapshot_table_async_from_dict(): + await test_snapshot_table_async(request_type=dict) -def test_update_backup_field_headers(): - client = BigtableTableAdminClient( +def test_snapshot_table_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() - request.backup.name = "name_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = table.Backup() - client.update_backup(request) + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5312,26 +7413,28 @@ def test_update_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=name_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_update_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_snapshot_table_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() - request.backup.name = "name_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.update_backup(request) + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5342,109 +7445,131 @@ async def test_update_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=name_value", + "name=name_value", ) in kw["metadata"] -def test_update_backup_flattened(): - client = BigtableTableAdminClient( +def test_snapshot_table_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" assert arg == mock_val -def test_update_backup_flattened_error(): - client = BigtableTableAdminClient( +def test_snapshot_table_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) @pytest.mark.asyncio -async def test_update_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_snapshot_table_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + response = await client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" assert arg == mock_val @pytest.mark.asyncio -async def test_update_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_snapshot_table_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + await client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteBackupRequest, + bigtable_table_admin.GetSnapshotRequest, dict, ], ) -def test_delete_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_get_snapshot(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5454,43 +7579,142 @@ def test_delete_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_backup(request) + call.return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + response = client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" -def test_delete_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_get_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetSnapshotRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - client.delete_backup() + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_snapshot(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + assert args[0] == bigtable_table_admin.GetSnapshotRequest( + name="name_value", + ) + + +def test_get_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + request = {} + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_delete_backup_async( +async def test_get_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteBackupRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_snapshot + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_snapshot + ] = mock_rpc + + request = {} + await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSnapshotRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5499,40 +7723,52 @@ async def test_delete_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + response = await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" @pytest.mark.asyncio -async def test_delete_backup_async_from_dict(): - await test_delete_backup_async(request_type=dict) +async def test_get_snapshot_async_from_dict(): + await test_get_snapshot_async(request_type=dict) -def test_delete_backup_field_headers(): - client = BigtableTableAdminClient( +def test_get_snapshot_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = None - client.delete_backup(request) + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5548,21 +7784,21 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio -async def test_delete_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_snapshot_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request) + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5577,18 +7813,18 @@ async def test_delete_backup_field_headers_async(): ) in kw["metadata"] -def test_delete_backup_flattened(): - client = BigtableTableAdminClient( +def test_get_snapshot_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None + call.return_value = table.Snapshot() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_backup( + client.get_snapshot( name="name_value", ) @@ -5601,35 +7837,35 @@ def test_delete_backup_flattened(): assert arg == mock_val -def test_delete_backup_flattened_error(): - client = BigtableTableAdminClient( +def test_get_snapshot_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", ) @pytest.mark.asyncio -async def test_delete_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_snapshot_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None + call.return_value = table.Snapshot() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_backup( + response = await client.get_snapshot( name="name_value", ) @@ -5643,16 +7879,16 @@ async def test_delete_backup_flattened_async(): @pytest.mark.asyncio -async def test_delete_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_snapshot_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), + await client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", ) @@ -5660,12 +7896,12 @@ async def test_delete_backup_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListBackupsRequest, + bigtable_table_admin.ListSnapshotsRequest, dict, ], ) -def test_list_backups(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_list_snapshots(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5675,46 +7911,138 @@ def test_list_backups(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse( + call.return_value = bigtable_table_admin.ListSnapshotsResponse( next_page_token="next_page_token_value", ) - response = client.list_backups(request) + response = client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) + assert isinstance(response, pagers.ListSnapshotsPager) assert response.next_page_token == "next_page_token_value" -def test_list_backups_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_list_snapshots_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListSnapshotsRequest( + parent="parent_value", + page_token="page_token_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - client.list_backups() + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_snapshots(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + assert args[0] == bigtable_table_admin.ListSnapshotsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_snapshots_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_snapshots in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc + request = {} + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_snapshots(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_list_backups_async( +async def test_list_snapshots_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListBackupsRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_snapshots + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_snapshots + ] = mock_rpc + + request = {} + await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_snapshots(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_snapshots_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5723,45 +8051,46 @@ async def test_list_backups_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( + bigtable_table_admin.ListSnapshotsResponse( next_page_token="next_page_token_value", ) ) - response = await client.list_backups(request) + response = await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsAsyncPager) + assert isinstance(response, pagers.ListSnapshotsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio -async def test_list_backups_async_from_dict(): - await test_list_backups_async(request_type=dict) +async def test_list_snapshots_async_from_dict(): + await test_list_snapshots_async(request_type=dict) -def test_list_backups_field_headers(): - client = BigtableTableAdminClient( +def test_list_snapshots_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request) + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5777,23 +8106,23 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio -async def test_list_backups_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_snapshots_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() + bigtable_table_admin.ListSnapshotsResponse() ) - await client.list_backups(request) + await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5808,18 +8137,18 @@ async def test_list_backups_field_headers_async(): ) in kw["metadata"] -def test_list_backups_flattened(): - client = BigtableTableAdminClient( +def test_list_snapshots_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() + call.return_value = bigtable_table_admin.ListSnapshotsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_backups( + client.list_snapshots( parent="parent_value", ) @@ -5832,37 +8161,37 @@ def test_list_backups_flattened(): assert arg == mock_val -def test_list_backups_flattened_error(): - client = BigtableTableAdminClient( +def test_list_snapshots_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", ) @pytest.mark.asyncio -async def test_list_backups_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_snapshots_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() + call.return_value = bigtable_table_admin.ListSnapshotsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() + bigtable_table_admin.ListSnapshotsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_backups( + response = await client.list_snapshots( parent="parent_value", ) @@ -5876,150 +8205,154 @@ async def test_list_backups_flattened_async(): @pytest.mark.asyncio -async def test_list_backups_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_list_snapshots_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.list_backups( - bigtable_table_admin.ListBackupsRequest(), + await client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", ) -def test_list_backups_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, +def test_list_snapshots_pager(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_backups(request={}) + pager = client.list_snapshots(request={}, retry=retry, timeout=timeout) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) + assert all(isinstance(i, table.Snapshot) for i in results) -def test_list_backups_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, +def test_list_snapshots_pages(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, ) - pages = list(client.list_backups(request={}).pages) + pages = list(client.list_snapshots(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio -async def test_list_backups_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, +async def test_list_snapshots_async_pager(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, ) - async_pager = await client.list_backups( + async_pager = await client.list_snapshots( request={}, ) assert async_pager.next_page_token == "abc" @@ -6028,43 +8361,43 @@ async def test_list_backups_async_pager(): responses.append(response) assert len(responses) == 6 - assert all(isinstance(i, table.Backup) for i in responses) + assert all(isinstance(i, table.Snapshot) for i in responses) @pytest.mark.asyncio -async def test_list_backups_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, +async def test_list_snapshots_async_pages(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, @@ -6073,7 +8406,7 @@ async def test_list_backups_async_pages(): # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 async for page_ in ( # pragma: no branch - await client.list_backups(request={}) + await client.list_snapshots(request={}) ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): @@ -6083,12 +8416,12 @@ async def test_list_backups_async_pages(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.RestoreTableRequest, + bigtable_table_admin.DeleteSnapshotRequest, dict, ], ) -def test_restore_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_delete_snapshot(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6098,43 +8431,133 @@ def test_restore_table(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.restore_table(request) + call.return_value = None + response = client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert response is None -def test_restore_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_delete_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteSnapshotRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - client.restore_table() + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_snapshot(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest( + name="name_value", + ) + + +def test_delete_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + request = {} + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_restore_table_async( +async def test_delete_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.RestoreTableRequest, ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_snapshot + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_snapshot + ] = mock_rpc + + request = {} + await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6143,42 +8566,41 @@ async def test_restore_table_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.restore_table(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert response is None @pytest.mark.asyncio -async def test_restore_table_async_from_dict(): - await test_restore_table_async(request_type=dict) +async def test_delete_snapshot_async_from_dict(): + await test_delete_snapshot_async(request_type=dict) -def test_restore_table_field_headers(): - client = BigtableTableAdminClient( +def test_delete_snapshot_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request) + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6189,28 +8611,26 @@ def test_restore_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_restore_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_snapshot_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.restore_table(request) + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6221,19 +8641,99 @@ async def test_restore_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] +def test_delete_snapshot_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_snapshot( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_snapshot_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_snapshot( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", + ) + + @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CopyBackupRequest, + bigtable_table_admin.CreateBackupRequest, dict, ], ) -def test_copy_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_create_backup(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6243,42 +8743,145 @@ def test_copy_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.copy_backup(request) + response = client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.CreateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_copy_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_create_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - client.copy_backup() + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_backup(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + assert args[0] == bigtable_table_admin.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + ) + + +def test_create_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + request = {} + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_copy_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +async def test_create_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_backup + ] = mock_rpc + + request = {} + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateBackupRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6287,42 +8890,43 @@ async def test_copy_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.copy_backup(request) + response = await client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.CreateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_copy_backup_async_from_dict(): - await test_copy_backup_async(request_type=dict) +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) -def test_copy_backup_field_headers(): - client = BigtableTableAdminClient( +def test_create_backup_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.CreateBackupRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.copy_backup(request) + client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6338,23 +8942,23 @@ def test_copy_backup_field_headers(): @pytest.mark.asyncio -async def test_copy_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_backup_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.CreateBackupRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.copy_backup(request) + await client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6369,22 +8973,21 @@ async def test_copy_backup_field_headers_async(): ) in kw["metadata"] -def test_copy_backup_flattened(): - client = BigtableTableAdminClient( +def test_create_backup_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.copy_backup( + client.create_backup( parent="parent_value", backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + backup=table.Backup(name="name_value"), ) # Establish that the underlying call was made with the expected @@ -6397,39 +9000,35 @@ def test_copy_backup_flattened(): arg = args[0].backup_id mock_val = "backup_id_value" assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) -def test_copy_backup_flattened_error(): - client = BigtableTableAdminClient( +def test_create_backup_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), parent="parent_value", backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + backup=table.Backup(name="name_value"), ) @pytest.mark.asyncio -async def test_copy_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_backup_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") @@ -6438,11 +9037,10 @@ async def test_copy_backup_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.copy_backup( + response = await client.create_backup( parent="parent_value", backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + backup=table.Backup(name="name_value"), ) # Establish that the underlying call was made with the expected @@ -6455,41 +9053,37 @@ async def test_copy_backup_flattened_async(): arg = args[0].backup_id mock_val = "backup_id_value" assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) @pytest.mark.asyncio -async def test_copy_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_create_backup_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), + await client.create_backup( + bigtable_table_admin.CreateBackupRequest(), parent="parent_value", backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + backup=table.Backup(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_table_admin.GetBackupRequest, dict, ], ) -def test_get_iam_policy(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_get_backup(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6499,47 +9093,143 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) - response = client.get_iam_policy(request) + response = client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_get_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetBackupRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - client.get_iam_policy() + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_backup(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == bigtable_table_admin.GetBackupRequest( + name="name_value", + ) + + +def test_get_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + request = {} + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest +async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_backup + ] = mock_rpc + + request = {} + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6548,47 +9238,56 @@ async def test_get_iam_policy_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) - response = await client.get_iam_policy(request) + response = await client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) -def test_get_iam_policy_field_headers(): - client = BigtableTableAdminClient( +def test_get_backup_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6599,26 +9298,26 @@ def test_get_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_backup_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6629,116 +9328,99 @@ async def test_get_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] -def test_get_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy( - request={ - "resource": "resource_value", - "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): - client = BigtableTableAdminClient( +def test_get_backup_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_iam_policy( - resource="resource_value", + client.get_backup( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_get_iam_policy_flattened_error(): - client = BigtableTableAdminClient( +def test_get_backup_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_backup_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource="resource_value", + response = await client.get_backup( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_get_backup_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + await client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.SetIamPolicyRequest, + bigtable_table_admin.UpdateBackupRequest, dict, ], ) -def test_set_iam_policy(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_update_backup(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6748,47 +9430,142 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + response = client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.UpdateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_update_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateBackupRequest() + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - client.set_iam_policy() + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_backup(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + +def test_update_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + request = {} + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest +async def test_update_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_backup + ] = mock_rpc + + request = {} + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateBackupRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6797,47 +9574,56 @@ async def test_set_iam_policy_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) - response = await client.set_iam_policy(request) + response = await client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.UpdateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) -def test_set_iam_policy_field_headers(): - client = BigtableTableAdminClient( +def test_update_backup_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.UpdateBackupRequest() - request.resource = "resource_value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6848,26 +9634,26 @@ def test_set_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "backup.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_backup_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.UpdateBackupRequest() - request.resource = "resource_value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6878,117 +9664,109 @@ async def test_set_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "backup.name=name_value", ) in kw["metadata"] -def test_set_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy( - request={ - "resource": "resource_value", - "policy": policy_pb2.Policy(version=774), - "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): - client = BigtableTableAdminClient( +def test_update_backup_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.set_iam_policy( - resource="resource_value", + client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val -def test_set_iam_policy_flattened_error(): - client = BigtableTableAdminClient( +def test_update_backup_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_backup_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource="resource_value", + response = await client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_update_backup_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + await client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, + bigtable_table_admin.DeleteBackupRequest, dict, ], ) -def test_test_iam_permissions(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test_delete_backup(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6998,101 +9776,176 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - response = client.test_iam_permissions(request) + call.return_value = None + response = client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.DeleteBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + assert response is None -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( +def test_delete_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteBackupRequest( + name="name_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - client.test_iam_permissions() + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_backup(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == bigtable_table_admin.DeleteBackupRequest( + name="name_value", + ) -@pytest.mark.asyncio -async def test_test_iam_permissions_async( - transport: str = "grpc_asyncio", - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_delete_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + # Ensure method has been cached + assert client._transport.delete_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.test_iam_permissions(request) + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + request = {} + client.delete_backup(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) +async def test_delete_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -def test_test_iam_permissions_field_headers(): - client = BigtableTableAdminClient( + # Ensure method has been cached + assert ( + client._client._transport.delete_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_backup + ] = mock_rpc + + request = {} + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteBackupRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.DeleteBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -7103,30 +9956,26 @@ def test_test_iam_permissions_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_backup_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.DeleteBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - await client.test_iam_permissions(request) + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -7137,573 +9986,13047 @@ async def test_test_iam_permissions_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] -def test_test_iam_permissions_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions( - request={ - "resource": "resource_value", - "permissions": ["permissions_value"], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): - client = BigtableTableAdminClient( +def test_delete_backup_flattened(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], + client.delete_backup( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_test_iam_permissions_flattened_error(): - client = BigtableTableAdminClient( +def test_delete_backup_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_backup_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + call.return_value = None - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], + response = await client.delete_backup( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" - assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), +async def test_delete_backup_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + await client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateTableRequest, + bigtable_table_admin.ListBackupsRequest, dict, ], ) -def test_create_table_rest(request_type): - client = BigtableTableAdminClient( +def test_list_backups(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", ) + response = client.list_backups(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_table(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListBackupsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" -def test_create_table_rest_required_fields( - request_type=bigtable_table_admin.CreateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport +def test_list_backups_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListBackupsRequest( + parent="parent_value", + filter="filter_value", + order_by="order_by_value", + page_token="page_token_value", ) - # verify fields with default values are dropped + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_backups(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest( + parent="parent_value", + filter="filter_value", + order_by="order_by_value", + page_token="page_token_value", + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +def test_list_backups_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert client._transport.list_backups in client._transport._wrapped_methods - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + request = {} + client.list_backups(request) - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + client.list_backups(request) - response_value = Response() - response_value.status_code = 200 + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.asyncio +async def test_list_backups_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response = client.create_table(request) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Ensure method has been cached + assert ( + client._client._transport.list_backups + in client._client._transport._wrapped_methods + ) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_backups + ] = mock_rpc -def test_create_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + request = {} + await client.list_backups(request) - unset_fields = transport.create_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "table", - ) - ) - ) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + await client.list_backups(request) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_backups_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListBackupsRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb( - bigtable_table_admin.CreateTableRequest() + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + response = await client.list_backups(request) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gba_table.Table.to_json(gba_table.Table()) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListBackupsRequest() + assert args[0] == request - request = bigtable_table_admin.CreateTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gba_table.Table() + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + assert response.next_page_token == "next_page_token_value" - client.create_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) -def test_create_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest -): - client = BigtableTableAdminClient( +def test_list_backups_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_table(request) + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_create_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() ) - mock_args.update(sample_request) + await client.list_backups(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] - client.create_table(**mock_args) + +def test_list_backups_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_create_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( +def test_list_backups_flattened_error(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), ) -def test_create_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, - ], -) -def test_create_table_from_snapshot_rest(request_type): - client = BigtableTableAdminClient( + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", + ) + + +def test_list_backups_pager(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backups(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + +def test_list_backups_pages(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_backups( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Backup) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_backups(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.RestoreTableRequest, + dict, + ], +) +def test__restore_table(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.RestoreTableRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test__restore_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.RestoreTableRequest( + parent="parent_value", + table_id="table_id_value", + backup="backup_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._restore_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest( + parent="parent_value", + table_id="table_id_value", + backup="backup_value", + ) + + +def test__restore_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.restore_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + request = {} + client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client._restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test__restore_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.restore_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.restore_table + ] = mock_rpc + + request = {} + await client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client._restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test__restore_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.RestoreTableRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.RestoreTableRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test__restore_table_async_from_dict(): + await test__restore_table_async(request_type=dict) + + +def test__restore_table_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test__restore_table_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CopyBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.copy_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + +def test_copy_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + request = {} + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_copy_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.copy_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.copy_backup + ] = mock_rpc + + request = {} + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_copy_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CopyBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_copy_backup_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +def test_copy_backup_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + +def test_get_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_iam_policy + ] = mock_rpc + + request = {} + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_get_iam_policy_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + +def test_set_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.set_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.set_iam_policy + ] = mock_rpc + + request = {} + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_set_iam_policy_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + +def test_test_iam_permissions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.test_iam_permissions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +def test_test_iam_permissions_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateSchemaBundleRequest, + dict, + ], +) +def test_create_schema_bundle(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CreateSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + ) + + +def test_create_schema_bundle_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_schema_bundle + ] = mock_rpc + request = {} + client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_schema_bundle + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_schema_bundle + ] = mock_rpc + + request = {} + await client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateSchemaBundleRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CreateSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_schema_bundle_async_from_dict(): + await test_create_schema_bundle_async(request_type=dict) + + +def test_create_schema_bundle_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateSchemaBundleRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_schema_bundle_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateSchemaBundleRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_schema_bundle_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_schema_bundle( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].schema_bundle_id + mock_val = "schema_bundle_id_value" + assert arg == mock_val + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + + +def test_create_schema_bundle_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_schema_bundle_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_schema_bundle( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].schema_bundle_id + mock_val = "schema_bundle_id_value" + assert arg == mock_val + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_schema_bundle_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateSchemaBundleRequest, + dict, + ], +) +def test_update_schema_bundle(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UpdateSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateSchemaBundleRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateSchemaBundleRequest() + + +def test_update_schema_bundle_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_schema_bundle + ] = mock_rpc + request = {} + client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_schema_bundle + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_schema_bundle + ] = mock_rpc + + request = {} + await client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UpdateSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_schema_bundle_async_from_dict(): + await test_update_schema_bundle_async(request_type=dict) + + +def test_update_schema_bundle_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateSchemaBundleRequest() + + request.schema_bundle.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "schema_bundle.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_schema_bundle_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateSchemaBundleRequest() + + request.schema_bundle.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "schema_bundle.name=name_value", + ) in kw["metadata"] + + +def test_update_schema_bundle_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_schema_bundle( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_schema_bundle_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_schema_bundle_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_schema_bundle( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_schema_bundle_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetSchemaBundleRequest, + dict, + ], +) +def test_get_schema_bundle(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + response = client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.GetSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" + + +def test_get_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetSchemaBundleRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSchemaBundleRequest( + name="name_value", + ) + + +def test_get_schema_bundle_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_schema_bundle in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_schema_bundle + ] = mock_rpc + request = {} + client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_schema_bundle + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_schema_bundle + ] = mock_rpc + + request = {} + await client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSchemaBundleRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + ) + response = await client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.GetSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_schema_bundle_async_from_dict(): + await test_get_schema_bundle_async(request_type=dict) + + +def test_get_schema_bundle_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSchemaBundleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = table.SchemaBundle() + client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_schema_bundle_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSchemaBundleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) + await client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_schema_bundle_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_schema_bundle( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_schema_bundle_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_schema_bundle_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_schema_bundle( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_schema_bundle_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListSchemaBundlesRequest, + dict, + ], +) +def test_list_schema_bundles(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListSchemaBundlesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSchemaBundlesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_schema_bundles_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListSchemaBundlesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_schema_bundles(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSchemaBundlesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_schema_bundles_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_schema_bundles in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_schema_bundles + ] = mock_rpc + request = {} + client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_schema_bundles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_schema_bundles + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_schema_bundles + ] = mock_rpc + + request = {} + await client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_schema_bundles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSchemaBundlesRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListSchemaBundlesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSchemaBundlesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async_from_dict(): + await test_list_schema_bundles_async(request_type=dict) + + +def test_list_schema_bundles_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSchemaBundlesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_schema_bundles_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSchemaBundlesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse() + ) + await client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_schema_bundles_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_schema_bundles( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_schema_bundles_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_schema_bundles_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_schema_bundles( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_schema_bundles_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", + ) + + +def test_list_schema_bundles_pager(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_schema_bundles(request={}, retry=retry, timeout=timeout) + + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in results) + + +def test_list_schema_bundles_pages(transport_name: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + pages = list(client.list_schema_bundles(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async_pager(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_schema_bundles( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in responses) + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async_pages(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_schema_bundles(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteSchemaBundleRequest, + dict, + ], +) +def test_delete_schema_bundle(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteSchemaBundleRequest( + name="name_value", + etag="etag_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteSchemaBundleRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_schema_bundle_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_schema_bundle + ] = mock_rpc + request = {} + client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_schema_bundle + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_schema_bundle + ] = mock_rpc + + request = {} + await client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, +): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_async_from_dict(): + await test_delete_schema_bundle_async(request_type=dict) + + +def test_delete_schema_bundle_field_headers(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSchemaBundleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = None + client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSchemaBundleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_schema_bundle_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_schema_bundle( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_schema_bundle_flattened_error(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_flattened_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_schema_bundle( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_schema_bundle_flattened_error_async(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", + ) + + +def test_create_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_table] = mock_rpc + + request = {} + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_table_rest_required_fields( + request_type=bigtable_table_admin.CreateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "table", + ) + ) + ) + + +def test_create_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_create_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_table_from_snapshot + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_table_from_snapshot + ] = mock_rpc + + request = {} + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_table_from_snapshot_rest_required_fields( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request_init["source_snapshot"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + jsonified_request["sourceSnapshot"] = "source_snapshot_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + assert "sourceSnapshot" in jsonified_request + assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_table_from_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_from_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "sourceSnapshot", + ) + ) + ) + + +def test_create_table_from_snapshot_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_table_from_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + % client.transport._host, + args[1], + ) + + +def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_list_tables_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_tables in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc + + request = {} + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_tables_rest_required_fields( + request_type=bigtable_table_admin.ListTablesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_tables(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_tables_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_tables._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) + + +def test_list_tables_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_tables(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_list_tables_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", + ) + + +def test_list_tables_rest_pager(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_tables(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + pages = list(client.list_tables(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_table] = mock_rpc + + request = {} + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_table_rest_required_fields( + request_type=bigtable_table_admin.GetTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) + + +def test_get_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + args[1], + ) + + +def test_get_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), + name="name_value", + ) + + +def test_update_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + + request = {} + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_table_rest_required_fields( + request_type=bigtable_table_admin.UpdateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set( + ( + "table", + "updateMask", + ) + ) + ) + + +def test_update_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table.name=projects/*/instances/*/tables/*}" + % client.transport._host, + args[1], + ) + + +def test_update_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc + + request = {} + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_table_rest_required_fields( + request_type=bigtable_table_admin.DeleteTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_delete_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + args[1], + ) + + +def test_delete_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name="name_value", + ) + + +def test_undelete_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.undelete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc + + request = {} + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_undelete_table_rest_required_fields( + request_type=bigtable_table_admin.UndeleteTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.undelete_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_undelete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.undelete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_undelete_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.undelete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" + % client.transport._host, + args[1], + ) + + +def test_undelete_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name="name_value", + ) + + +def test_create_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc + + request = {} + client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["authorized_view_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "authorizedViewId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_authorized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["authorizedViewId"] = "authorized_view_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("authorized_view_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_authorized_view(request) + + expected_params = [ + ( + "authorizedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_authorized_view_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("authorizedViewId",)) + & set( + ( + "parent", + "authorizedViewId", + "authorizedView", + ) + ) + ) + + +def test_create_authorized_view_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_authorized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" + % client.transport._host, + args[1], + ) + + +def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", + ) + + +def test_list_authorized_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc + + request = {} + client.list_authorized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_authorized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_authorized_views_rest_required_fields( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_authorized_views._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_authorized_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_authorized_views(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_authorized_views_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_authorized_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) + + +def test_list_authorized_views_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_authorized_views(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" + % client.transport._host, + args[1], + ) + + +def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", + ) + + +def test_list_authorized_views_rest_pager(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + pager = client.list_authorized_views(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in results) + + pages = list(client.list_authorized_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_authorized_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_authorized_view + ] = mock_rpc + + request = {} + client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_authorized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.AuthorizedView() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_authorized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_authorized_view_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) + + +def test_get_authorized_view_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.AuthorizedView() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_authorized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), + name="name_value", + ) + + +def test_update_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_authorized_view + ] = mock_rpc + + request = {} + client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_authorized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_authorized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_authorized_view_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set(("authorizedView",)) + ) + + +def test_update_authorized_view_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } + + # get truthy value for each flattened field + mock_args = dict( + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_authorized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_authorized_view + ] = mock_rpc + + request = {} + client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_authorized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_authorized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_authorized_view_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_authorized_view_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_authorized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), + name="name_value", + ) + + +def test_modify_column_families_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.modify_column_families + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.modify_column_families + ] = mock_rpc + + request = {} + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_modify_column_families_rest_required_fields( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.modify_column_families(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_modify_column_families_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.modify_column_families._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "modifications", + ) + ) + ) + + +def test_modify_column_families_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.modify_column_families(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" + % client.transport._host, + args[1], + ) + + +def test_modify_column_families_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +def test_drop_row_range_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.drop_row_range in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + + request = {} + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.drop_row_range(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_drop_row_range_rest_required_fields( + request_type=bigtable_table_admin.DropRowRangeRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.drop_row_range(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_drop_row_range_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.drop_row_range._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_consistency_token + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_consistency_token + ] = mock_rpc + + request = {} + client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_consistency_token(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_generate_consistency_token_rest_required_fields( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.generate_consistency_token(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_consistency_token_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_generate_consistency_token_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.generate_consistency_token(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" + % client.transport._host, + args[1], + ) + + +def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", + ) + + +def test_check_consistency_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.check_consistency in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc + + request = {} + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_check_consistency_rest_required_fields( + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["consistency_token"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["consistencyToken"] = "consistency_token_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "consistencyToken" in jsonified_request + assert jsonified_request["consistencyToken"] == "consistency_token_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.check_consistency(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_check_consistency_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.check_consistency._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "consistencyToken", + ) + ) + ) + + +def test_check_consistency_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + consistency_token="consistency_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.check_consistency(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + % client.transport._host, + args[1], + ) + + +def test_check_consistency_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_snapshot_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.snapshot_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + + request = {} + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_snapshot_table_rest_required_fields( + request_type=bigtable_table_admin.SnapshotTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["cluster"] = "" + request_init["snapshot_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["cluster"] = "cluster_value" + jsonified_request["snapshotId"] = "snapshot_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "cluster" in jsonified_request + assert jsonified_request["cluster"] == "cluster_value" + assert "snapshotId" in jsonified_request + assert jsonified_request["snapshotId"] == "snapshot_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.snapshot_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_snapshot_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.snapshot_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "cluster", + "snapshotId", + ) + ) + ) + + +def test_snapshot_table_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.snapshot_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" + % client.transport._host, + args[1], + ) + + +def test_snapshot_table_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_get_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + + request = {} + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_snapshot_rest_required_fields( + request_type=bigtable_table_admin.GetSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_snapshot_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + % client.transport._host, + args[1], + ) + + +def test_get_snapshot_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name="name_value", + ) + + +def test_list_snapshots_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_snapshots in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc + + request = {} + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_snapshots(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_snapshots_rest_required_fields( + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_snapshots._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_snapshots._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_snapshots(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_snapshots_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_snapshots._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_snapshots_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_snapshots(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + % client.transport._host, + args[1], + ) + + +def test_list_snapshots_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", + ) + + +def test_list_snapshots_rest_pager(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_snapshots(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) + + pages = list(client.list_snapshots(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + + request = {} + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_snapshot_rest_required_fields( + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_delete_snapshot_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", + ) + + +def test_create_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + + request = {} + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_backup_rest_required_fields( + request_type=bigtable_table_admin.CreateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "backupId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == request_init["backup_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("backup_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_backup(request) + + expected_params = [ + ( + "backupId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("backupId",)) + & set( + ( + "parent", + "backupId", + "backup", + ) + ) + ) + + +def test_create_backup_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) + + +def test_create_backup_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +def test_get_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + + request = {} + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_backup_rest_required_fields( + request_type=bigtable_table_admin.GetBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_backup_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_get_backup_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", + ) + + +def test_update_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + + request = {} + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_backup_rest_required_fields( + request_type=bigtable_table_admin.UpdateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "backup", + "updateMask", + ) + ) + ) + + +def test_update_backup_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + + # get truthy value for each flattened field + mock_args = dict( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_update_backup_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + + request = {} + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_backup_rest_required_fields( + request_type=bigtable_table_admin.DeleteBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_delete_backup_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_backup_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", + ) + + +def test_list_backups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_backups in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + + request = {} + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_backups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_backups_rest_required_fields( + request_type=bigtable_table_admin.ListBackupsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_backups(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_backups_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_backups._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_backups_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_backups(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) + + +def test_list_backups_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", + ) + + +def test_list_backups_rest_pager(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_backups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + pages = list(client.list_backups(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test__restore_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.restore_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + + request = {} + client._restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client._restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test__restore_table_rest_required_fields( + request_type=bigtable_table_admin.RestoreTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client._restore_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test__restore_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.restore_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + ) + ) + ) + + +def test_copy_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + + request = {} + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.copy_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", + ) + ) + ) + + +def test_copy_backup_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + % client.transport._host, + args[1], + ) + + +def test_copy_backup_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) + + +def test_get_iam_policy_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", + ) + ) + ) + + +def test_set_iam_policy_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +def test_test_iam_permissions_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_create_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_schema_bundle + ] = mock_rpc + + request = {} + client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.CreateSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["schema_bundle_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "schemaBundleId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "schemaBundleId" in jsonified_request + assert jsonified_request["schemaBundleId"] == request_init["schema_bundle_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["schemaBundleId"] = "schema_bundle_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("schema_bundle_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "schemaBundleId" in jsonified_request + assert jsonified_request["schemaBundleId"] == "schema_bundle_id_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_schema_bundle(request) + + expected_params = [ + ( + "schemaBundleId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("schemaBundleId",)) + & set( + ( + "parent", + "schemaBundleId", + "schemaBundle", + ) + ) + ) + + +def test_create_schema_bundle_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" + % client.transport._host, + args[1], + ) + + +def test_create_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + +def test_update_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_schema_bundle + ] = mock_rpc + + request = {} + client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set(("schemaBundle",)) + ) + + +def test_update_schema_bundle_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } + + # get truthy value for each flattened field + mock_args = dict( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_update_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_get_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_schema_bundle in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_schema_bundle + ] = mock_rpc + + request = {} + client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.GetSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.SchemaBundle() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_schema_bundle_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.SchemaBundle() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), + name="name_value", + ) + + +def test_list_schema_bundles_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_schema_bundles in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_schema_bundles + ] = mock_rpc + + request = {} + client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_schema_bundles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_schema_bundles_rest_required_fields( + request_type=bigtable_table_admin.ListSchemaBundlesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_schema_bundles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_schema_bundles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSchemaBundlesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_schema_bundles(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_schema_bundles_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_schema_bundles._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_schema_bundles_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSchemaBundlesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_schema_bundles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" + % client.transport._host, + args[1], + ) + + +def test_list_schema_bundles_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", + ) + + +def test_list_schema_bundles_rest_pager(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSchemaBundlesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + pager = client.list_schema_bundles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in results) + + pages = list(client.list_schema_bundles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_schema_bundle + ] = mock_rpc + + request = {} + client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_schema_bundle_rest_flattened(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BaseBigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BaseBigtableTableAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BaseBigtableTableAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BaseBigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BaseBigtableTableAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BaseBigtableTableAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = gba_table.Table() + client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_from_snapshot_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_tables_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = table.Table() + client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = None + client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_undelete_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_authorized_view_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_authorized_views_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_authorized_view_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + call.return_value = table.AuthorizedView() + client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_authorized_view_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_authorized_view_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = None + client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_modify_column_families_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_drop_row_range_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = None + client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_consistency_token_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_consistency_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_snapshot_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_snapshot_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_snapshots_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_snapshot_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_backup_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_backup_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_backup_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_backup_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_backups_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test__restore_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client._restore_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_copy_backup_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_schema_bundle_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_schema_bundle_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_schema_bundle_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = table.SchemaBundle() + client.get_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_schema_bundles_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_schema_bundle_empty_call_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = None + client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BaseBigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_tables_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_undelete_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_authorized_view_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_authorized_views_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_authorized_view_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_authorized_view_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_authorized_view_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_modify_column_families_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_drop_row_range_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_consistency_token_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + await client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_check_consistency_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + ) + await client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_snapshot_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_snapshot_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + await client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_snapshots_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_snapshot_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_backup_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_backup_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_backup_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_backup_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_backups_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test__restore_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client._restore_table(request=None) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_table_from_snapshot(request) - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_copy_backup_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.copy_backup(request=None) -def test_create_table_from_snapshot_rest_required_fields( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request_init["source_snapshot"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", ) - # verify fields with default values are dropped + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() - # verify required fields with default values are now present + assert args[0] == request_msg - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - jsonified_request["sourceSnapshot"] = "source_snapshot_value" - unset_fields = transport_class( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_schema_bundle_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_schema_bundle_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_schema_bundle_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + ) + await client.get_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_schema_bundles_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_schema_bundle_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BaseBigtableTableAdminClient.get_transport_class("rest")( credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + ) + assert transport.kind == "rest" - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - assert "sourceSnapshot" in jsonified_request - assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_create_table_rest_bad_request( + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_table(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], +) +def test_create_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.create_table_from_snapshot(request) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_create_table_from_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_table(request) - unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "sourceSnapshot", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == "name_value" + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): +def test_create_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_create_table" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( - bigtable_table_admin.CreateTableFromSnapshotRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb( + bigtable_table_admin.CreateTableRequest() ) transcode.return_value = { "method": "post", @@ -7712,22 +23035,22 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = gba_table.Table.to_json(gba_table.Table()) + req.return_value.content = return_value - request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = gba_table.Table() + post_with_metadata.return_value = gba_table.Table(), metadata - client.create_table_from_snapshot( + client.create_table( request, metadata=[ ("key", "val"), @@ -7737,17 +23060,15 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_table_from_snapshot_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) @@ -7757,89 +23078,26 @@ def test_create_table_from_snapshot_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table_from_snapshot(request) -def test_create_table_from_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.create_table_from_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - % client.transport._host, - args[1], - ) - - -def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", - ) - - -def test_create_table_from_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListTablesRequest, + bigtable_table_admin.CreateTableFromSnapshotRequest, dict, ], ) -def test_list_tables_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_create_table_from_snapshot_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -7849,150 +23107,50 @@ def test_list_tables_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_tables(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_table_from_snapshot(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_tables_rest_required_fields( - request_type=bigtable_table_admin.ListTablesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.list_tables(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_tables_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_tables._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) - ) - & set(("parent",)) - ) + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + transports.BigtableTableAdminRestInterceptor, + "post_create_table_from_snapshot_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb( - bigtable_table_admin.ListTablesRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( + bigtable_table_admin.CreateTableFromSnapshotRequest() ) transcode.return_value = { "method": "post", @@ -8001,22 +23159,22 @@ def test_list_tables_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json( - bigtable_table_admin.ListTablesResponse() - ) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.CreateTableFromSnapshotRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.list_tables( + client.create_table_from_snapshot( request, metadata=[ ("key", "val"), @@ -8026,16 +23184,15 @@ def test_list_tables_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_tables_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest + request_type=bigtable_table_admin.ListTablesRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) @@ -8045,260 +23202,188 @@ def test_list_tables_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_tables(request) -def test_list_tables_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.list_tables(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) - - -def test_list_tables_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) - - -def test_list_tables_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListTablesResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_tables(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) for i in results) - - pages = list(client.list_tables(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetTableRequest, + bigtable_table_admin.ListTablesRequest, dict, ], ) -def test_get_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_list_tables_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_table(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_tables(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == "next_page_token_value" -def test_get_table_rest_required_fields( - request_type=bigtable_table_admin.GetTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb( + bigtable_table_admin.ListTablesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListTablesResponse.to_json( + bigtable_table_admin.ListTablesResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_table_admin.ListTablesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListTablesResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" + client.list_tables( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_table(request) - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) +def test_get_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.get_table(request) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_get_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_table(request) - unset_fields = transport.get_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -8309,7 +23394,8 @@ def test_get_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -8317,10 +23403,13 @@ def test_get_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GetTableRequest.pb( bigtable_table_admin.GetTableRequest() ) @@ -8331,10 +23420,11 @@ def test_get_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Table.to_json(table.Table()) + req.return_value.content = return_value request = bigtable_table_admin.GetTableRequest() metadata = [ @@ -8343,6 +23433,7 @@ def test_get_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata client.get_table( request, @@ -8354,18 +23445,19 @@ def test_get_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest +def test_update_table_rest_bad_request( + request_type=bigtable_table_admin.UpdateTableRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8373,72 +23465,14 @@ def test_get_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_table(request) - - -def test_get_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) - - -def test_get_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", - ) - - -def test_get_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_table(request) @pytest.mark.parametrize( @@ -8448,10 +23482,9 @@ def test_get_table_rest_error(): dict, ], ) -def test_update_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_update_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -8475,7 +23508,122 @@ def test_update_table_rest(request_type): }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, + "automated_backup_policy": {"retention_period": {}, "frequency": {}}, + "tiered_storage_config": {"infrequent_access": {"include_if_older_than": {}}}, + "row_key_schema": { + "fields": [ + { + "field_name": "field_name_value", + "type_": { + "bytes_type": {"encoding": {"raw": {}}}, + "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}}, + "int64_type": { + "encoding": { + "big_endian_bytes": {"bytes_type": {}}, + "ordered_code_bytes": {}, + } + }, + "float32_type": {}, + "float64_type": {}, + "bool_type": {}, + "timestamp_type": {"encoding": {"unix_micros_int64": {}}}, + "date_type": {}, + "aggregate_type": { + "input_type": {}, + "state_type": {}, + "sum": {}, + "hllpp_unique_count": {}, + "max_": {}, + "min_": {}, + }, + "struct_type": {}, + "array_type": {"element_type": {}}, + "map_type": {"key_type": {}, "value_type": {}}, + "proto_type": { + "schema_bundle_id": "schema_bundle_id_value", + "message_name": "message_name_value", + }, + "enum_type": { + "schema_bundle_id": "schema_bundle_id_value", + "enum_name": "enum_name_value", + }, + }, + } + ], + "encoding": { + "singleton": {}, + "delimited_bytes": {"delimiter": b"delimiter_blob"}, + "ordered_code_bytes": {}, + }, + }, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8484,132 +23632,161 @@ def test_update_table_rest(request_type): return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + json_return_value = json_format.MessageToJson(return_value) -def test_update_table_rest_required_fields( - request_type=bigtable_table_admin.UpdateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BaseBigtableTableAdminClient(transport=transport) - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb( + bigtable_table_admin.UpdateTableRequest() ) - ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # verify fields with default values are dropped + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + request = bigtable_table_admin.UpdateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - # verify required fields with default values are now present + client.update_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_delete_table_rest_bad_request( + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_table(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_table(request) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteTableRequest, + dict, + ], +) +def test_delete_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None -def test_update_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_table(request) - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "table", - "updateMask", - ) - ) - ) + # Establish that the response is the type that we expect. + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): +def test_delete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_table" + transports.BigtableTableAdminRestInterceptor, "pre_delete_table" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb( - bigtable_table_admin.UpdateTableRequest() + pb_message = bigtable_table_admin.DeleteTableRequest.pb( + bigtable_table_admin.DeleteTableRequest() ) transcode.return_value = { "method": "post", @@ -8618,22 +23795,18 @@ def test_update_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.DeleteTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - client.update_table( + client.delete_table( request, metadata=[ ("key", "val"), @@ -8642,39 +23815,16 @@ def test_update_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_update_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest +def test_undelete_table_rest_bad_request( + request_type=bigtable_table_admin.UndeleteTableRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8682,89 +23832,26 @@ def test_update_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_table(request) - - -def test_update_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - - # get truthy value for each flattened field - mock_args = dict( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.update_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table.name=projects/*/instances/*/tables/*}" - % client.transport._host, - args[1], - ) - - -def test_update_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_update_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.undelete_table(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteTableRequest, + bigtable_table_admin.UndeleteTableRequest, dict, ], ) -def test_delete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_undelete_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -8774,124 +23861,252 @@ def test_delete_table_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_table(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.undelete_table(request) # Establish that the response is the type that we expect. - assert response is None - + json_return_value = json_format.MessageToJson(return_value) -def test_delete_table_rest_required_fields( - request_type=bigtable_table_admin.DeleteTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undelete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_undelete_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_undelete_table_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb( + bigtable_table_admin.UndeleteTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_table_admin.UndeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - jsonified_request["name"] = "name_value" + client.undelete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_create_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_authorized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateAuthorizedViewRequest, + dict, + ], +) +def test_create_authorized_view_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response_value = Response() - response_value.status_code = 200 - json_return_value = "" + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["authorized_view"] = { + "name": "name_value", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] - response = client.delete_table(request) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] + request = request_type(**request_init) -def test_delete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") - unset_fields = transport.delete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_authorized_view(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): +def test_create_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_table" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_create_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb( - bigtable_table_admin.DeleteTableRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( + bigtable_table_admin.CreateAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -8900,18 +24115,22 @@ def test_delete_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_table( + client.create_authorized_view( request, metadata=[ ("key", "val"), @@ -8920,18 +24139,18 @@ def test_delete_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest +def test_list_authorized_views_rest_bad_request( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8939,217 +24158,224 @@ def test_delete_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_table(request) - - -def test_delete_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) - - -def test_delete_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name="name_value", - ) - - -def test_delete_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_authorized_views(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UndeleteTableRequest, + bigtable_table_admin.ListAuthorizedViewsRequest, dict, ], ) -def test_undelete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_list_authorized_views_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.undelete_table(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_authorized_views(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - + assert isinstance(response, pagers.ListAuthorizedViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_undelete_table_rest_required_fields( - request_type=bigtable_table_admin.UndeleteTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_authorized_views_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_list_authorized_views_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( + bigtable_table_admin.ListAuthorizedViewsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + bigtable_table_admin.ListAuthorizedViewsResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_table_admin.ListAuthorizedViewsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListAuthorizedViewsResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" + client.list_authorized_views( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_get_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_authorized_view(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetAuthorizedViewRequest, + dict, + ], +) +def test_get_authorized_view_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.undelete_table(request) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_undelete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_authorized_view(request) - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): +def test_get_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_undelete_table" + transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + transports.BigtableTableAdminRestInterceptor, + "post_get_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb( - bigtable_table_admin.UndeleteTableRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( + bigtable_table_admin.GetAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -9158,22 +24384,22 @@ def test_undelete_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.AuthorizedView.to_json(table.AuthorizedView()) + req.return_value.content = return_value - request = bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = table.AuthorizedView() + post_with_metadata.return_value = table.AuthorizedView(), metadata - client.undelete_table( + client.get_authorized_view( request, metadata=[ ("key", "val"), @@ -9183,18 +24409,21 @@ def test_undelete_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_undelete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest +def test_update_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9202,234 +24431,280 @@ def test_undelete_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.undelete_table(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_authorized_view(request) -def test_undelete_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateAuthorizedViewRequest, + dict, + ], +) +def test_update_authorized_view_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } + request_init["authorized_view"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = operations_pb2.Operation(name="operations/spam") - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_authorized_view(request) - client.undelete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) -def test_undelete_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_authorized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_update_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( + bigtable_table_admin.UpdateAuthorizedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.update_authorized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_undelete_table_rest_error(): - client = BigtableTableAdminClient( +def test_delete_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_authorized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, + bigtable_table_admin.DeleteAuthorizedViewRequest, dict, ], ) -def test_modify_column_families_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_delete_authorized_view_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.modify_column_families(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_modify_column_families_rest_required_fields( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.modify_column_families(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_modify_column_families_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + return_value = None - unset_fields = transport.modify_column_families._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "modifications", - ) - ) - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_authorized_view(request) + + # Establish that the response is the type that we expect. + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_modify_column_families_rest_interceptors(null_interceptor): +def test_delete_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" + transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( - bigtable_table_admin.ModifyColumnFamiliesRequest() + pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( + bigtable_table_admin.DeleteAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -9438,20 +24713,18 @@ def test_modify_column_families_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Table() - client.modify_column_families( + client.delete_authorized_view( request, metadata=[ ("key", "val"), @@ -9460,18 +24733,14 @@ def test_modify_column_families_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() def test_modify_column_families_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -9481,83 +24750,146 @@ def test_modify_column_families_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.modify_column_families(request) -def test_modify_column_families_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, + ], +) +def test_modify_column_families_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( + return_value = table.Table( name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.modify_column_families(**mock_args) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.modify_column_families(request) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True -def test_modify_column_families_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_modify_column_families_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_modify_column_families_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( + bigtable_table_admin.ModifyColumnFamiliesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Table.to_json(table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_modify_column_families_rest_error(): - client = BigtableTableAdminClient( +def test_drop_row_range_rest_bad_request( + request_type=bigtable_table_admin.DropRowRangeRequest, +): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.drop_row_range(request) @pytest.mark.parametrize( @@ -9567,10 +24899,9 @@ def test_modify_column_families_rest_error(): dict, ], ) -def test_drop_row_range_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_drop_row_range_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -9583,122 +24914,162 @@ def test_drop_row_range_rest(request_type): return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_row_range(request) # Establish that the response is the type that we expect. assert response is None -def test_drop_row_range_rest_required_fields( - request_type=bigtable_table_admin.DropRowRangeRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_drop_row_range_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DropRowRangeRequest.pb( + bigtable_table_admin.DropRowRangeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # verify required fields with default values are now present + request = bigtable_table_admin.DropRowRangeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata - jsonified_request["name"] = "name_value" + client.drop_row_range( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_generate_consistency_token_rest_bad_request( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.generate_consistency_token(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], +) +def test_generate_consistency_token_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.drop_row_range(request) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_drop_row_range_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.generate_consistency_token(request) - unset_fields = transport.drop_row_range._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_drop_row_range_rest_interceptors(null_interceptor): +def test_generate_consistency_token_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" + transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_generate_consistency_token_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DropRowRangeRequest.pb( - bigtable_table_admin.DropRowRangeRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + bigtable_table_admin.GenerateConsistencyTokenRequest() ) transcode.return_value = { "method": "post", @@ -9707,18 +25078,27 @@ def test_drop_row_range_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + req.return_value.content = return_value - request = bigtable_table_admin.DropRowRangeRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse(), + metadata, + ) - client.drop_row_range( + client.generate_consistency_token( request, metadata=[ ("key", "val"), @@ -9727,16 +25107,16 @@ def test_drop_row_range_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_drop_row_range_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DropRowRangeRequest +def test_check_consistency_rest_bad_request( + request_type=bigtable_table_admin.CheckConsistencyRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -9746,30 +25126,26 @@ def test_drop_row_range_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.drop_row_range(request) - - -def test_drop_row_range_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.check_consistency(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GenerateConsistencyTokenRequest, + bigtable_table_admin.CheckConsistencyRequest, dict, ], ) -def test_generate_consistency_token_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_check_consistency_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -9779,138 +25155,183 @@ def test_generate_consistency_token_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.generate_consistency_token(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.check_consistency(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" - + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True -def test_generate_consistency_token_rest_required_fields( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_consistency_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_check_consistency" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_check_consistency_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( + bigtable_table_admin.CheckConsistencyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( + bigtable_table_admin.CheckConsistencyResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_table_admin.CheckConsistencyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.CheckConsistencyResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.CheckConsistencyResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" + client.check_consistency( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_snapshot_table_rest_bad_request( + request_type=bigtable_table_admin.SnapshotTableRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.snapshot_table(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.generate_consistency_token(request) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.SnapshotTableRequest, + dict, + ], +) +def test_snapshot_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_generate_consistency_token_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.snapshot_table(request) - unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_consistency_token_rest_interceptors(null_interceptor): +def test_snapshot_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" + transports.BigtableTableAdminRestInterceptor, + "post_snapshot_table_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - bigtable_table_admin.GenerateConsistencyTokenRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.SnapshotTableRequest.pb( + bigtable_table_admin.SnapshotTableRequest() ) transcode.return_value = { "method": "post", @@ -9919,24 +25340,22 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( - bigtable_table_admin.GenerateConsistencyTokenResponse() - ) - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.SnapshotTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.generate_consistency_token( + client.snapshot_table( request, metadata=[ ("key", "val"), @@ -9946,19 +25365,19 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_generate_consistency_token_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +def test_get_snapshot_rest_bad_request( + request_type=bigtable_table_admin.GetSnapshotRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9966,239 +25385,218 @@ def test_generate_consistency_token_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.generate_consistency_token(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_snapshot(request) -def test_generate_consistency_token_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetSnapshotRequest, + dict, + ], +) +def test_get_snapshot_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( + return_value = table.Snapshot( name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.generate_consistency_token(**mock_args) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_snapshot(request) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" -def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GetSnapshotRequest.pb( + bigtable_table_admin.GetSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Snapshot.to_json(table.Snapshot()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Snapshot() + post_with_metadata.return_value = table.Snapshot(), metadata + + client.get_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_generate_consistency_token_rest_error(): - client = BigtableTableAdminClient( +def test_list_snapshots_rest_bad_request( + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_snapshots(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CheckConsistencyRequest, + bigtable_table_admin.ListSnapshotsRequest, dict, ], ) -def test_check_consistency_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_list_snapshots_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, + return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.check_consistency(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -def test_check_consistency_rest_required_fields( - request_type=bigtable_table_admin.CheckConsistencyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["consistency_token"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - jsonified_request["consistencyToken"] = "consistency_token_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "consistencyToken" in jsonified_request - assert jsonified_request["consistencyToken"] == "consistency_token_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.check_consistency(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - + response_value = mock.Mock() + response_value.status_code = 200 -def test_check_consistency_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_snapshots(request) - unset_fields = transport.check_consistency._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "consistencyToken", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_consistency_rest_interceptors(null_interceptor): +def test_list_snapshots_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_check_consistency" + transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" + transports.BigtableTableAdminRestInterceptor, + "post_list_snapshots_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( - bigtable_table_admin.CheckConsistencyRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( + bigtable_table_admin.ListSnapshotsRequest() ) transcode.return_value = { "method": "post", @@ -10207,24 +25605,27 @@ def test_check_consistency_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_table_admin.CheckConsistencyResponse.to_json( - bigtable_table_admin.CheckConsistencyResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( + bigtable_table_admin.ListSnapshotsResponse() ) + req.return_value.content = return_value - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.ListSnapshotsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.CheckConsistencyResponse() + post.return_value = bigtable_table_admin.ListSnapshotsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListSnapshotsResponse(), + metadata, + ) - client.check_consistency( + client.list_snapshots( request, metadata=[ ("key", "val"), @@ -10234,18 +25635,19 @@ def test_check_consistency_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_check_consistency_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CheckConsistencyRequest +def test_delete_snapshot_rest_bad_request( + request_type=bigtable_table_admin.DeleteSnapshotRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10253,223 +25655,264 @@ def test_check_consistency_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_consistency(request) - - -def test_check_consistency_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - consistency_token="consistency_token_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.check_consistency(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - % client.transport._host, - args[1], - ) - - -def test_check_consistency_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name="name_value", - consistency_token="consistency_token_value", - ) - - -def test_check_consistency_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_snapshot(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.SnapshotTableRequest, + bigtable_table_admin.DeleteSnapshotRequest, dict, ], ) -def test_snapshot_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_delete_snapshot_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.snapshot_table(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_snapshot(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - + assert response is None -def test_snapshot_table_rest_required_fields( - request_type=bigtable_table_admin.SnapshotTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["name"] = "" - request_init["cluster"] = "" - request_init["snapshot_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( + bigtable_table_admin.DeleteSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # verify required fields with default values are now present + request = bigtable_table_admin.DeleteSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata - jsonified_request["name"] = "name_value" - jsonified_request["cluster"] = "cluster_value" - jsonified_request["snapshotId"] = "snapshot_id_value" + client.delete_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "cluster" in jsonified_request - assert jsonified_request["cluster"] == "cluster_value" - assert "snapshotId" in jsonified_request - assert jsonified_request["snapshotId"] == "snapshot_id_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_create_backup_rest_bad_request( + request_type=bigtable_table_admin.CreateBackupRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_backup(request) - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateBackupRequest, + dict, + ], +) +def test_create_backup_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init["backup"] = { + "name": "name_value", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - response = client.snapshot_table(request) + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_snapshot_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_backup(request) - unset_fields = transport.snapshot_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "cluster", - "snapshotId", - ) - ) - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_snapshot_table_rest_interceptors(null_interceptor): +def test_create_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -10477,14 +25920,17 @@ def test_snapshot_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" + transports.BigtableTableAdminRestInterceptor, "post_create_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" + transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_backup" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.SnapshotTableRequest.pb( - bigtable_table_admin.SnapshotTableRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateBackupRequest.pb( + bigtable_table_admin.CreateBackupRequest() ) transcode.return_value = { "method": "post", @@ -10493,22 +25939,22 @@ def test_snapshot_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.CreateBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.snapshot_table( + client.create_backup( request, metadata=[ ("key", "val"), @@ -10518,18 +25964,19 @@ def test_snapshot_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_snapshot_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.SnapshotTableRequest +def test_get_backup_rest_bad_request( + request_type=bigtable_table_admin.GetBackupRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10537,235 +25984,94 @@ def test_snapshot_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.snapshot_table(request) - - -def test_snapshot_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.snapshot_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" - % client.transport._host, - args[1], - ) - - -def test_snapshot_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", - ) - - -def test_snapshot_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetSnapshotRequest, + bigtable_table_admin.GetBackupRequest, dict, ], ) -def test_get_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_get_backup_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Snapshot( + return_value = table.Backup( name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_snapshot(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" - - -def test_get_snapshot_rest_required_fields( - request_type=bigtable_table_admin.GetSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.get_snapshot(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.get_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_snapshot_rest_interceptors(null_interceptor): +def test_get_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_get_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_backup" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.GetSnapshotRequest.pb( - bigtable_table_admin.GetSnapshotRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GetBackupRequest.pb( + bigtable_table_admin.GetBackupRequest() ) transcode.return_value = { "method": "post", @@ -10774,20 +26080,22 @@ def test_get_snapshot_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Snapshot.to_json(table.Snapshot()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Backup.to_json(table.Backup()) + req.return_value.content = return_value - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.GetBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Snapshot() + post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata - client.get_snapshot( + client.get_backup( request, metadata=[ ("key", "val"), @@ -10797,19 +26105,20 @@ def test_get_snapshot_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_snapshot_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetSnapshotRequest +def test_update_backup_rest_bad_request( + request_type=bigtable_table_admin.UpdateBackupRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } } request = request_type(**request_init) @@ -10818,241 +26127,189 @@ def test_get_snapshot_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_snapshot(request) - - -def test_get_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - % client.transport._host, - args[1], - ) - - -def test_get_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name="name_value", - ) - - -def test_get_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListSnapshotsRequest, + bigtable_table_admin.UpdateBackupRequest, dict, ], ) -def test_list_snapshots_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_update_backup_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request_init["backup"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_snapshots(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_snapshots_rest_required_fields( - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.list_snapshots(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_snapshots_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_snapshots._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_snapshots_rest_interceptors(null_interceptor): +def test_update_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" + transports.BigtableTableAdminRestInterceptor, "post_update_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" + transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_backup" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( - bigtable_table_admin.ListSnapshotsRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UpdateBackupRequest.pb( + bigtable_table_admin.UpdateBackupRequest() ) transcode.return_value = { "method": "post", @@ -11061,22 +26318,22 @@ def test_list_snapshots_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListSnapshotsResponse.to_json( - bigtable_table_admin.ListSnapshotsResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Backup.to_json(table.Backup()) + req.return_value.content = return_value - request = bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.UpdateBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSnapshotsResponse() + post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata - client.list_snapshots( + client.update_backup( request, metadata=[ ("key", "val"), @@ -11086,18 +26343,19 @@ def test_list_snapshots_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_snapshots_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListSnapshotsRequest +def test_delete_backup_rest_bad_request( + request_type=bigtable_table_admin.DeleteBackupRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11105,276 +26363,72 @@ def test_list_snapshots_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_snapshots(request) - - -def test_list_snapshots_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.list_snapshots(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - % client.transport._host, - args[1], - ) - - -def test_list_snapshots_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", - ) - - -def test_list_snapshots_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_snapshots(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) - - pages = list(client.list_snapshots(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteSnapshotRequest, + bigtable_table_admin.DeleteBackupRequest, dict, ], ) -def test_delete_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_delete_backup_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" } request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_snapshot(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_snapshot_rest_required_fields( - request_type=bigtable_table_admin.DeleteSnapshotRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.delete_snapshot(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None -def test_delete_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_backup(request) - unset_fields = transport.delete_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_snapshot_rest_interceptors(null_interceptor): +def test_delete_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" + transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( - bigtable_table_admin.DeleteSnapshotRequest() + pb_message = bigtable_table_admin.DeleteBackupRequest.pb( + bigtable_table_admin.DeleteBackupRequest() ) transcode.return_value = { "method": "post", @@ -11383,18 +26437,18 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DeleteBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - client.delete_snapshot( + client.delete_backup( request, metadata=[ ("key", "val"), @@ -11405,18 +26459,14 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): pre.assert_called_once() -def test_delete_snapshot_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteSnapshotRequest +def test_list_backups_rest_bad_request( + request_type=bigtable_table_admin.ListBackupsRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11424,268 +26474,82 @@ def test_delete_snapshot_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_snapshot(request) - - -def test_delete_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", - ) - - -def test_delete_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_backups(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateBackupRequest, + bigtable_table_admin.ListBackupsRequest, dict, ], ) -def test_create_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_list_backups_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_backup(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_backups(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_backup_rest_required_fields( - request_type=bigtable_table_admin.CreateBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - assert "backupId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == request_init["backup_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("backup_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.create_backup(request) - - expected_params = [ - ( - "backupId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_create_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.create_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("backupId",)) - & set( - ( - "parent", - "backupId", - "backup", - ) - ) - ) + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_backup_rest_interceptors(null_interceptor): +def test_list_backups_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_backups" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_backup" + transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_backups" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.CreateBackupRequest.pb( - bigtable_table_admin.CreateBackupRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListBackupsRequest.pb( + bigtable_table_admin.ListBackupsRequest() ) transcode.return_value = { "method": "post", @@ -11694,22 +26558,27 @@ def test_create_backup_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListBackupsResponse.to_json( + bigtable_table_admin.ListBackupsResponse() ) + req.return_value.content = return_value - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.ListBackupsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = bigtable_table_admin.ListBackupsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListBackupsResponse(), + metadata, + ) - client.create_backup( + client.list_backups( request, metadata=[ ("key", "val"), @@ -11719,42 +26588,17 @@ def test_create_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateBackupRequest +def test__restore_table_rest_bad_request( + request_type=bigtable_table_admin.RestoreTableRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11762,237 +26606,201 @@ def test_create_backup_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_backup(request) - - -def test_create_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.create_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" - % client.transport._host, - args[1], - ) - - -def test_create_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - - -def test_create_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client._restore_table(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetBackupRequest, + bigtable_table_admin.RestoreTableRequest, dict, ], ) -def test_get_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test__restore_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_backup(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client._restore_table(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + json_return_value = json_format.MessageToJson(return_value) -def test_get_backup_rest_required_fields( - request_type=bigtable_table_admin.GetBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test__restore_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_restore_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_restore_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.RestoreTableRequest.pb( + bigtable_table_admin.RestoreTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable_table_admin.RestoreTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - jsonified_request["name"] = "name_value" + client._restore_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_copy_backup_rest_bad_request( + request_type=bigtable_table_admin.CopyBackupRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.copy_backup(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_backup(request) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_get_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.copy_backup(request) - unset_fields = transport.get_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_backup_rest_interceptors(null_interceptor): +def test_copy_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_backup" + transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.GetBackupRequest.pb( - bigtable_table_admin.GetBackupRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() ) transcode.return_value = { "method": "post", @@ -12001,20 +26809,22 @@ def test_get_backup_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CopyBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Backup() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_backup( + client.copy_backup( request, metadata=[ ("key", "val"), @@ -12024,20 +26834,17 @@ def test_get_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetBackupRequest +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12045,267 +26852,81 @@ def test_get_backup_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_backup(request) - - -def test_get_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_get_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name="name_value", - ) - - -def test_get_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateBackupRequest, + iam_policy_pb2.GetIamPolicyRequest, dict, ], ) -def test_update_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -def test_update_backup_rest_required_fields( - request_type=bigtable_table_admin.UpdateBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_get_iam_policy_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.update_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - -def test_update_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) - unset_fields = transport.update_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "backup", - "updateMask", - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) - ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_backup_rest_interceptors(null_interceptor): +def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup" + transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_backup" + transports.BigtableTableAdminRestInterceptor, + "post_get_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.UpdateBackupRequest.pb( - bigtable_table_admin.UpdateBackupRequest() - ) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -12313,20 +26934,22 @@ def test_update_backup_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value - request = bigtable_table_admin.UpdateBackupRequest() + request = iam_policy_pb2.GetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Backup() + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.update_backup( + client.get_iam_policy( request, metadata=[ ("key", "val"), @@ -12336,46 +26959,17 @@ def test_update_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UpdateBackupRequest +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12383,222 +26977,81 @@ def test_update_backup_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_backup(request) - - -def test_update_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - - # get truthy value for each flattened field - mock_args = dict( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.update_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_update_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_update_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.set_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteBackupRequest, + iam_policy_pb2.SetIamPolicyRequest, dict, ], ) -def test_delete_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_set_iam_policy_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_backup(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.set_iam_policy(request) # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_backup_rest_required_fields( - request_type=bigtable_table_admin.DeleteBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = "name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.delete_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.delete_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_backup_rest_interceptors(null_interceptor): +def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" + transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_set_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteBackupRequest.pb( - bigtable_table_admin.DeleteBackupRequest() - ) + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -12606,18 +27059,22 @@ def test_delete_backup_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value - request = bigtable_table_admin.DeleteBackupRequest() + request = iam_policy_pb2.SetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.delete_backup( + client.set_iam_policy( request, metadata=[ ("key", "val"), @@ -12626,20 +27083,18 @@ def test_delete_backup_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteBackupRequest +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12647,242 +27102,281 @@ def test_delete_backup_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_backup(request) - - -def test_delete_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 + response_value = mock.Mock() json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() req.return_value = response_value - - client.delete_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -def test_delete_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.test_iam_permissions(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListBackupsRequest, + iam_policy_pb2.TestIamPermissionsRequest, dict, ], ) -def test_list_backups_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_test_iam_permissions_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_backups(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" - + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] -def test_list_backups_rest_required_fields( - request_type=bigtable_table_admin.ListBackupsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # verify required fields with default values are now present + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) + req.return_value.content = return_value - jsonified_request["parent"] = "parent_value" + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", + client.test_iam_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + +def test_create_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.CreateSchemaBundleRequest, +): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_schema_bundle(request) - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateSchemaBundleRequest, + dict, + ], +) +def test_create_schema_bundle_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["schema_bundle"] = { + "name": "name_value", + "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, + "etag": "etag_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateSchemaBundleRequest.meta.fields[ + "schema_bundle" + ] - response = client.list_backups(request) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["schema_bundle"][field])): + del request_init["schema_bundle"][field][i][subfield] + else: + del request_init["schema_bundle"][field][subfield] + request = request_type(**request_init) -def test_list_backups_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_schema_bundle(request) - unset_fields = transport.list_backups._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_backups_rest_interceptors(null_interceptor): +def test_create_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_schema_bundle" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_backups" + transports.BigtableTableAdminRestInterceptor, + "post_create_schema_bundle_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.ListBackupsRequest.pb( - bigtable_table_admin.ListBackupsRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateSchemaBundleRequest.pb( + bigtable_table_admin.CreateSchemaBundleRequest() ) transcode.return_value = { "method": "post", @@ -12891,22 +27385,22 @@ def test_list_backups_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListBackupsResponse.to_json( - bigtable_table_admin.ListBackupsResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.CreateSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListBackupsResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.list_backups( + client.create_schema_bundle( request, metadata=[ ("key", "val"), @@ -12916,18 +27410,21 @@ def test_list_backups_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_backups_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListBackupsRequest +def test_update_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12935,277 +27432,138 @@ def test_list_backups_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_backups(request) - - -def test_list_backups_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.list_backups(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" - % client.transport._host, - args[1], - ) - - -def test_list_backups_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -def test_list_backups_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) - - pages = list(client.list_backups(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.RestoreTableRequest, + bigtable_table_admin.UpdateSchemaBundleRequest, dict, ], ) -def test_restore_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_update_schema_bundle_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.restore_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_restore_table_rest_required_fields( - request_type=bigtable_table_admin.RestoreTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present + request_init = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } + request_init["schema_bundle"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4", + "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, + "etag": "etag_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateSchemaBundleRequest.meta.fields[ + "schema_bundle" + ] - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["schema_bundle"][field])): + del request_init["schema_bundle"][field][i][subfield] + else: + del request_init["schema_bundle"][field][subfield] request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.restore_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_restore_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_schema_bundle(request) - unset_fields = transport.restore_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - ) - ) - ) + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_table_rest_interceptors(null_interceptor): +def test_update_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -13213,14 +27571,18 @@ def test_restore_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table" + transports.BigtableTableAdminRestInterceptor, "post_update_schema_bundle" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_restore_table" + transports.BigtableTableAdminRestInterceptor, + "post_update_schema_bundle_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.RestoreTableRequest.pb( - bigtable_table_admin.RestoreTableRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UpdateSchemaBundleRequest.pb( + bigtable_table_admin.UpdateSchemaBundleRequest() ) transcode.return_value = { "method": "post", @@ -13229,22 +27591,22 @@ def test_restore_table_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.UpdateSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.restore_table( + client.update_schema_bundle( request, metadata=[ ("key", "val"), @@ -13254,18 +27616,19 @@ def test_restore_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_restore_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.RestoreTableRequest +def test_get_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.GetSchemaBundleRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13273,181 +27636,87 @@ def test_restore_table_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.restore_table(request) - - -def test_restore_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CopyBackupRequest, + bigtable_table_admin.GetSchemaBundleRequest, dict, ], ) -def test_copy_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_get_schema_bundle_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.SchemaBundle( + name="name_value", + etag="etag_value", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.copy_backup(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_schema_bundle(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_copy_backup_rest_required_fields( - request_type=bigtable_table_admin.CopyBackupRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request_init["source_backup"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - jsonified_request["sourceBackup"] = "source_backup_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - assert "sourceBackup" in jsonified_request - assert jsonified_request["sourceBackup"] == "source_backup_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.copy_backup(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_copy_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.copy_backup._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "backupId", - "sourceBackup", - "expireTime", - ) - ) - ) + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_copy_backup_rest_interceptors(null_interceptor): +def test_get_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + transports.BigtableTableAdminRestInterceptor, "post_get_schema_bundle" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + transports.BigtableTableAdminRestInterceptor, + "post_get_schema_bundle_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.CopyBackupRequest.pb( - bigtable_table_admin.CopyBackupRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GetSchemaBundleRequest.pb( + bigtable_table_admin.GetSchemaBundleRequest() ) transcode.return_value = { "method": "post", @@ -13456,22 +27725,22 @@ def test_copy_backup_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.SchemaBundle.to_json(table.SchemaBundle()) + req.return_value.content = return_value - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.GetSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = table.SchemaBundle() + post_with_metadata.return_value = table.SchemaBundle(), metadata - client.copy_backup( + client.get_schema_bundle( request, metadata=[ ("key", "val"), @@ -13481,18 +27750,17 @@ def test_copy_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_copy_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest +def test_list_schema_bundles_rest_bad_request( + request_type=bigtable_table_admin.ListSchemaBundlesRequest, ): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13500,231 +27768,208 @@ def test_copy_backup_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.copy_backup(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_schema_bundles(request) -def test_copy_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListSchemaBundlesRequest, + dict, + ], +) +def test_list_schema_bundles_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + return_value = bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_schema_bundles(request) - client.copy_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSchemaBundlesPager) + assert response.next_page_token == "next_page_token_value" -def test_copy_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_schema_bundles_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BaseBigtableTableAdminClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_schema_bundles" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_list_schema_bundles_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_schema_bundles" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListSchemaBundlesRequest.pb( + bigtable_table_admin.ListSchemaBundlesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListSchemaBundlesResponse.to_json( + bigtable_table_admin.ListSchemaBundlesResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListSchemaBundlesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListSchemaBundlesResponse(), + metadata, + ) + + client.list_schema_bundles( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_copy_backup_rest_error(): - client = BigtableTableAdminClient( +def test_delete_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, +): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_table_admin.DeleteSchemaBundleRequest, dict, ], ) -def test_get_iam_policy_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", +def test_delete_schema_bundle_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) + return_value = None # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_schema_bundle(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.get_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): +def test_delete_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" + transports.BigtableTableAdminRestInterceptor, "pre_delete_schema_bundle" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() + pb_message = bigtable_table_admin.DeleteSchemaBundleRequest.pb( + bigtable_table_admin.DeleteSchemaBundleRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -13732,20 +27977,18 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.DeleteSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - client.get_iam_policy( + client.delete_schema_bundle( request, metadata=[ ("key", "val"), @@ -13754,766 +27997,765 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_get_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest -): - client = BigtableTableAdminClient( +def test_initialize_client_w_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + client.create_table(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + assert args[0] == request_msg -def test_get_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_from_snapshot_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + client.create_table_from_snapshot(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.get_iam_policy(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_tables_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_undelete_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_authorized_view_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + assert args[0] == request_msg -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_authorized_views_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + client.list_authorized_views(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() -def test_get_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + assert args[0] == request_msg -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy_rest(request_type): - client = BigtableTableAdminClient( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_authorized_view_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + client.get_authorized_view(request=None) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.set_iam_policy(request) - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_authorized_view_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + client.update_authorized_view(request=None) -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) + assert args[0] == request_msg - # verify fields with default values are dropped - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_authorized_view_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # verify required fields with default values are now present + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + client.delete_authorized_view(request=None) - jsonified_request["resource"] = "resource_value" + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + assert args[0] == request_msg - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - client = BigtableTableAdminClient( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_modify_column_families_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + client.modify_column_families(request=None) - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + assert args[0] == request_msg - response = client.set_iam_policy(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_drop_row_range_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + client.drop_row_range(request=None) -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) - ) - ) + assert args[0] == request_msg -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_consistency_token_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) + + # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + type(client.transport.generate_consistency_token), "__call__" + ) as call: + client.generate_consistency_token(request=None) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - request = iam_policy_pb2.SetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() + assert args[0] == request_msg - client.set_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - pre.assert_called_once() - post.assert_called_once() +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_consistency_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + client.check_consistency(request=None) -def test_set_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest -): - client = BigtableTableAdminClient( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_snapshot_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + client.snapshot_table(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.set_iam_policy(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + assert args[0] == request_msg -def test_set_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_snapshot_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + client.get_snapshot(request=None) - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + assert args[0] == request_msg - client.set_iam_policy(**mock_args) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" - % client.transport._host, - args[1], - ) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_snapshots_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + client.list_snapshots(request=None) -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_snapshot_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + client.delete_snapshot(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() -def test_set_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + assert args[0] == request_msg -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions_rest(request_type): - client = BigtableTableAdminClient( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_backup_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + client.create_backup(request=None) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.test_iam_permissions(request) - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_backup_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + client.get_backup(request=None) -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) + assert args[0] == request_msg - # verify fields with default values are dropped - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_backup_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # verify required fields with default values are now present + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + client.update_backup(request=None) - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + assert args[0] == request_msg - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" - client = BigtableTableAdminClient( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_backup_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + client.delete_backup(request=None) - response_value = Response() - response_value.status_code = 200 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.test_iam_permissions(request) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_backups_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test__restore_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + client._restore_table(request=None) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_copy_backup_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + client.copy_backup(request=None) - client.test_iam_permissions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() - pre.assert_called_once() - post.assert_called_once() + assert args[0] == request_msg -def test_test_iam_permissions_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest -): - client = BigtableTableAdminClient( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_flattened(): - client = BigtableTableAdminClient( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.test_iam_permissions(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_schema_bundle_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + client.create_schema_bundle(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() -def test_test_iam_permissions_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + assert args[0] == request_msg -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_schema_bundle_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + client.update_schema_bundle(request=None) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - transport=transport, - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + assert args[0] == request_msg - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_schema_bundle_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + client.get_schema_bundle(request=None) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BigtableTableAdminClient(transport=transport) - assert client.transport is transport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + assert args[0] == request_msg -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_schema_bundles_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_schema_bundle_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableTableAdminClient.get_transport_class(transport_name)( +def test_bigtable_table_admin_rest_lro_client(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have an api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, ) - assert transport.kind == transport_name + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client def test_transport_grpc_default(): # A client should use the gRPC transport by default. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( @@ -14551,6 +28793,11 @@ def test_bigtable_table_admin_base_transport(): "update_table", "delete_table", "undelete_table", + "create_authorized_view", + "list_authorized_views", + "get_authorized_view", + "update_authorized_view", + "delete_authorized_view", "modify_column_families", "drop_row_range", "generate_consistency_token", @@ -14569,6 +28816,11 @@ def test_bigtable_table_admin_base_transport(): "get_iam_policy", "set_iam_policy", "test_iam_permissions", + "create_schema_bundle", + "update_schema_bundle", + "get_schema_bundle", + "list_schema_bundles", + "delete_schema_bundle", ) for method in methods: with pytest.raises(NotImplementedError): @@ -14634,7 +28886,7 @@ def test_bigtable_table_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableTableAdminClient() + BaseBigtableTableAdminClient() adc.assert_called_once_with( scopes=None, default_scopes=( @@ -14799,23 +29051,6 @@ def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) -def test_bigtable_table_admin_rest_lro_client(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - @pytest.mark.parametrize( "transport_name", [ @@ -14825,7 +29060,7 @@ def test_bigtable_table_admin_rest_lro_client(): ], ) def test_bigtable_table_admin_host_no_port(transport_name): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" @@ -14848,7 +29083,7 @@ def test_bigtable_table_admin_host_no_port(transport_name): ], ) def test_bigtable_table_admin_host_with_port(transport_name): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" @@ -14871,11 +29106,11 @@ def test_bigtable_table_admin_host_with_port(transport_name): def test_bigtable_table_admin_client_transport_session_collision(transport_name): creds1 = ga_credentials.AnonymousCredentials() creds2 = ga_credentials.AnonymousCredentials() - client1 = BigtableTableAdminClient( + client1 = BaseBigtableTableAdminClient( credentials=creds1, transport=transport_name, ) - client2 = BigtableTableAdminClient( + client2 = BaseBigtableTableAdminClient( credentials=creds2, transport=transport_name, ) @@ -14900,6 +29135,21 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.undelete_table._session session2 = client2.transport.undelete_table._session assert session1 != session2 + session1 = client1.transport.create_authorized_view._session + session2 = client2.transport.create_authorized_view._session + assert session1 != session2 + session1 = client1.transport.list_authorized_views._session + session2 = client2.transport.list_authorized_views._session + assert session1 != session2 + session1 = client1.transport.get_authorized_view._session + session2 = client2.transport.get_authorized_view._session + assert session1 != session2 + session1 = client1.transport.update_authorized_view._session + session2 = client2.transport.update_authorized_view._session + assert session1 != session2 + session1 = client1.transport.delete_authorized_view._session + session2 = client2.transport.delete_authorized_view._session + assert session1 != session2 session1 = client1.transport.modify_column_families._session session2 = client2.transport.modify_column_families._session assert session1 != session2 @@ -14954,6 +29204,21 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.test_iam_permissions._session session2 = client2.transport.test_iam_permissions._session assert session1 != session2 + session1 = client1.transport.create_schema_bundle._session + session2 = client2.transport.create_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.update_schema_bundle._session + session2 = client2.transport.update_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.get_schema_bundle._session + session2 = client2.transport.get_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.list_schema_bundles._session + session2 = client2.transport.list_schema_bundles._session + assert session1 != session2 + session1 = client1.transport.delete_schema_bundle._session + session2 = client2.transport.delete_schema_bundle._session + assert session1 != session2 def test_bigtable_table_admin_grpc_transport_channel(): @@ -14984,6 +29249,7 @@ def test_bigtable_table_admin_grpc_asyncio_transport_channel(): # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.filterwarnings("ignore::FutureWarning") @pytest.mark.parametrize( "transport_class", [ @@ -15083,7 +29349,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): def test_bigtable_table_admin_grpc_lro_client(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -15100,7 +29366,7 @@ def test_bigtable_table_admin_grpc_lro_client(): def test_bigtable_table_admin_grpc_lro_async_client(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) @@ -15116,67 +29382,100 @@ def test_bigtable_table_admin_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_backup_path(): +def test_authorized_view_path(): project = "squid" instance = "clam" - cluster = "whelk" - backup = "octopus" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + table = "whelk" + authorized_view = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( project=project, instance=instance, - cluster=cluster, - backup=backup, + table=table, + authorized_view=authorized_view, + ) + actual = BaseBigtableTableAdminClient.authorized_view_path( + project, instance, table, authorized_view ) - actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) assert expected == actual -def test_parse_backup_path(): +def test_parse_authorized_view_path(): expected = { "project": "oyster", "instance": "nudibranch", - "cluster": "cuttlefish", - "backup": "mussel", + "table": "cuttlefish", + "authorized_view": "mussel", } - path = BigtableTableAdminClient.backup_path(**expected) + path = BaseBigtableTableAdminClient.authorized_view_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_backup_path(path) + actual = BaseBigtableTableAdminClient.parse_authorized_view_path(path) assert expected == actual -def test_cluster_path(): +def test_backup_path(): project = "winkle" instance = "nautilus" cluster = "scallop" + backup = "abalone" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + project=project, + instance=instance, + cluster=cluster, + backup=backup, + ) + actual = BaseBigtableTableAdminClient.backup_path( + project, instance, cluster, backup + ) + assert expected == actual + + +def test_parse_backup_path(): + expected = { + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "backup": "octopus", + } + path = BaseBigtableTableAdminClient.backup_path(**expected) + + # Check that the path construction is reversible. + actual = BaseBigtableTableAdminClient.parse_backup_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "oyster" + instance = "nudibranch" + cluster = "cuttlefish" expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, cluster=cluster, ) - actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) + actual = BaseBigtableTableAdminClient.cluster_path(project, instance, cluster) assert expected == actual def test_parse_cluster_path(): expected = { - "project": "abalone", - "instance": "squid", - "cluster": "clam", + "project": "mussel", + "instance": "winkle", + "cluster": "nautilus", } - path = BigtableTableAdminClient.cluster_path(**expected) + path = BaseBigtableTableAdminClient.cluster_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_cluster_path(path) + actual = BaseBigtableTableAdminClient.parse_cluster_path(path) assert expected == actual def test_crypto_key_version_path(): - project = "whelk" - location = "octopus" - key_ring = "oyster" - crypto_key = "nudibranch" - crypto_key_version = "cuttlefish" + project = "scallop" + location = "abalone" + key_ring = "squid" + crypto_key = "clam" + crypto_key_version = "whelk" expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( project=project, location=location, @@ -15184,7 +29483,7 @@ def test_crypto_key_version_path(): crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) - actual = BigtableTableAdminClient.crypto_key_version_path( + actual = BaseBigtableTableAdminClient.crypto_key_version_path( project, location, key_ring, crypto_key, crypto_key_version ) assert expected == actual @@ -15192,54 +29491,85 @@ def test_crypto_key_version_path(): def test_parse_crypto_key_version_path(): expected = { - "project": "mussel", - "location": "winkle", - "key_ring": "nautilus", - "crypto_key": "scallop", - "crypto_key_version": "abalone", + "project": "octopus", + "location": "oyster", + "key_ring": "nudibranch", + "crypto_key": "cuttlefish", + "crypto_key_version": "mussel", } - path = BigtableTableAdminClient.crypto_key_version_path(**expected) + path = BaseBigtableTableAdminClient.crypto_key_version_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_crypto_key_version_path(path) + actual = BaseBigtableTableAdminClient.parse_crypto_key_version_path(path) assert expected == actual def test_instance_path(): - project = "squid" - instance = "clam" + project = "winkle" + instance = "nautilus" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) - actual = BigtableTableAdminClient.instance_path(project, instance) + actual = BaseBigtableTableAdminClient.instance_path(project, instance) assert expected == actual def test_parse_instance_path(): expected = { - "project": "whelk", - "instance": "octopus", + "project": "scallop", + "instance": "abalone", + } + path = BaseBigtableTableAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BaseBigtableTableAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_schema_bundle_path(): + project = "squid" + instance = "clam" + table = "whelk" + schema_bundle = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( + project=project, + instance=instance, + table=table, + schema_bundle=schema_bundle, + ) + actual = BaseBigtableTableAdminClient.schema_bundle_path( + project, instance, table, schema_bundle + ) + assert expected == actual + + +def test_parse_schema_bundle_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "table": "cuttlefish", + "schema_bundle": "mussel", } - path = BigtableTableAdminClient.instance_path(**expected) + path = BaseBigtableTableAdminClient.schema_bundle_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_instance_path(path) + actual = BaseBigtableTableAdminClient.parse_schema_bundle_path(path) assert expected == actual def test_snapshot_path(): - project = "oyster" - instance = "nudibranch" - cluster = "cuttlefish" - snapshot = "mussel" + project = "winkle" + instance = "nautilus" + cluster = "scallop" + snapshot = "abalone" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) - actual = BigtableTableAdminClient.snapshot_path( + actual = BaseBigtableTableAdminClient.snapshot_path( project, instance, cluster, snapshot ) assert expected == actual @@ -15247,144 +29577,144 @@ def test_snapshot_path(): def test_parse_snapshot_path(): expected = { - "project": "winkle", - "instance": "nautilus", - "cluster": "scallop", - "snapshot": "abalone", + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "snapshot": "octopus", } - path = BigtableTableAdminClient.snapshot_path(**expected) + path = BaseBigtableTableAdminClient.snapshot_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_snapshot_path(path) + actual = BaseBigtableTableAdminClient.parse_snapshot_path(path) assert expected == actual def test_table_path(): - project = "squid" - instance = "clam" - table = "whelk" + project = "oyster" + instance = "nudibranch" + table = "cuttlefish" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) - actual = BigtableTableAdminClient.table_path(project, instance, table) + actual = BaseBigtableTableAdminClient.table_path(project, instance, table) assert expected == actual def test_parse_table_path(): expected = { - "project": "octopus", - "instance": "oyster", - "table": "nudibranch", + "project": "mussel", + "instance": "winkle", + "table": "nautilus", } - path = BigtableTableAdminClient.table_path(**expected) + path = BaseBigtableTableAdminClient.table_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_table_path(path) + actual = BaseBigtableTableAdminClient.parse_table_path(path) assert expected == actual def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) - actual = BigtableTableAdminClient.common_billing_account_path(billing_account) + actual = BaseBigtableTableAdminClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } - path = BigtableTableAdminClient.common_billing_account_path(**expected) + path = BaseBigtableTableAdminClient.common_billing_account_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_billing_account_path(path) + actual = BaseBigtableTableAdminClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format( folder=folder, ) - actual = BigtableTableAdminClient.common_folder_path(folder) + actual = BaseBigtableTableAdminClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } - path = BigtableTableAdminClient.common_folder_path(**expected) + path = BaseBigtableTableAdminClient.common_folder_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_folder_path(path) + actual = BaseBigtableTableAdminClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format( organization=organization, ) - actual = BigtableTableAdminClient.common_organization_path(organization) + actual = BaseBigtableTableAdminClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } - path = BigtableTableAdminClient.common_organization_path(**expected) + path = BaseBigtableTableAdminClient.common_organization_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_organization_path(path) + actual = BaseBigtableTableAdminClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format( project=project, ) - actual = BigtableTableAdminClient.common_project_path(project) + actual = BaseBigtableTableAdminClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } - path = BigtableTableAdminClient.common_project_path(**expected) + path = BaseBigtableTableAdminClient.common_project_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_project_path(path) + actual = BaseBigtableTableAdminClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) - actual = BigtableTableAdminClient.common_location_path(project, location) + actual = BaseBigtableTableAdminClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } - path = BigtableTableAdminClient.common_location_path(**expected) + path = BaseBigtableTableAdminClient.common_location_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_location_path(path) + actual = BaseBigtableTableAdminClient.parse_common_location_path(path) assert expected == actual @@ -15394,7 +29724,7 @@ def test_client_with_default_client_info(): with mock.patch.object( transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) @@ -15403,7 +29733,7 @@ def test_client_with_default_client_info(): with mock.patch.object( transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: - transport_class = BigtableTableAdminClient.get_transport_class() + transport_class = BaseBigtableTableAdminClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, @@ -15411,36 +29741,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", +async def test_transport_close_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BaseBigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): @@ -15449,7 +29784,7 @@ def test_client_ctx(): "grpc", ] for transport in transports: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. @@ -15463,9 +29798,9 @@ def test_client_ctx(): @pytest.mark.parametrize( "client_class,transport_class", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), + (BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, ), ], @@ -15484,7 +29819,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/tests/unit/gapic/bigtable_v2/__init__.py b/tests/unit/gapic/bigtable_v2/__init__.py index 89a37dc92..cbf94b283 100644 --- a/tests/unit/gapic/bigtable_v2/__init__.py +++ b/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 597540d69..ea7f0955d 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,11 +24,12 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -36,12 +37,20 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient @@ -50,16 +59,40 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.cloud.bigtable_v2.types import types from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -71,6 +104,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -95,6 +139,320 @@ def test__get_default_mtls_endpoint(): assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi +def test__read_environment_variables(): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableClient._read_environment_variables() == (True, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with pytest.raises(ValueError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + else: + assert BigtableClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableClient._read_environment_variables() == (False, "never", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableClient._read_environment_variables() == (False, "always", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test_use_client_cert_effective(): + # Test case 1: Test when `should_use_client_cert` returns True. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=True + ): + assert BigtableClient._use_client_cert_effective() is True + + # Test case 2: Test when `should_use_client_cert` returns False. + # We mock the `should_use_client_cert` function to simulate a scenario where + # the google-auth library supports automatic mTLS and determines that a + # client certificate should NOT be used. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch( + "google.auth.transport.mtls.should_use_client_cert", return_value=False + ): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 3: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "true". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableClient._use_client_cert_effective() is True + + # Test case 4: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"} + ): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 5: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "True". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "True"}): + assert BigtableClient._use_client_cert_effective() is True + + # Test case 6: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "False". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "False"} + ): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 7: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "TRUE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "TRUE"}): + assert BigtableClient._use_client_cert_effective() is True + + # Test case 8: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to "FALSE". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "FALSE"} + ): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 9: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not set. + # In this case, the method should return False, which is the default value. + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, clear=True): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 10: Test when `should_use_client_cert` is unavailable and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should raise a ValueError as the environment variable must be either + # "true" or "false". + if not hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + with pytest.raises(ValueError): + BigtableClient._use_client_cert_effective() + + # Test case 11: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is set to an invalid value. + # The method should return False as the environment variable is set to an invalid value. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "unsupported"} + ): + assert BigtableClient._use_client_cert_effective() is False + + # Test case 12: Test when `should_use_client_cert` is available and the + # `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is unset. Also, + # the GOOGLE_API_CONFIG environment variable is unset. + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": ""}): + with mock.patch.dict(os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": ""}): + assert BigtableClient._use_client_cert_effective() is False + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableClient._get_client_cert_source(None, False) is None + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, False) is None + ) + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "always") + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableClient._get_universe_domain(client_universe_domain, universe_domain_env) + == client_universe_domain + ) + assert ( + BigtableClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableClient._get_universe_domain(None, None) + == BigtableClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BigtableClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BigtableClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -201,12 +559,14 @@ def test_bigtable_client_get_transport_class(): ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) def test_bigtable_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. @@ -246,7 +606,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -276,15 +638,12 @@ def test_bigtable_client_client_options(client_class, transport_class, transport # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class(transport=transport_name) - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} - ): - with pytest.raises(ValueError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -294,7 +653,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -312,7 +673,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -344,12 +707,14 @@ def test_bigtable_client_client_options(client_class, transport_class, transport ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_client_mtls_env_auto( @@ -372,7 +737,9 @@ def test_bigtable_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -404,7 +771,9 @@ def test_bigtable_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -438,7 +807,9 @@ def test_bigtable_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -485,6 +856,119 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): assert api_endpoint == mock_api_endpoint assert cert_source is None + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "Unsupported". + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset. + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", None) + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + + # Test cases for mTLS enablement when GOOGLE_API_USE_CLIENT_CERTIFICATE is unset(empty). + test_cases = [ + ( + # With workloads present in config, mTLS is enabled. + { + "version": 1, + "cert_configs": { + "workload": { + "cert_path": "path/to/cert/file", + "key_path": "path/to/key/file", + } + }, + }, + mock_client_cert_source, + ), + ( + # With workloads not present in config, mTLS is disabled. + { + "version": 1, + "cert_configs": {}, + }, + None, + ), + ] + if hasattr(google.auth.transport.mtls, "should_use_client_cert"): + for config_data, expected_cert_source in test_cases: + env = os.environ.copy() + env.pop("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") + with mock.patch.dict(os.environ, env, clear=True): + config_filename = "mock_certificate_config.json" + config_file_content = json.dumps(config_data) + m = mock.mock_open(read_data=config_file_content) + with mock.patch("builtins.open", m): + with mock.patch.dict( + os.environ, {"GOOGLE_API_CERTIFICATE_CONFIG": config_filename} + ): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, + api_endpoint=mock_api_endpoint, + ) + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is expected_cert_source + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() @@ -524,6 +1008,101 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient]) +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test_bigtable_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -546,7 +1125,9 @@ def test_bigtable_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -581,7 +1162,9 @@ def test_bigtable_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -634,7 +1217,9 @@ def test_bigtable_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -705,27 +1290,121 @@ def test_read_rows(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() + request = bigtable.ReadRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: assert isinstance(message, bigtable.ReadRowsResponse) -def test_read_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_read_rows_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - client.read_rows() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_rows(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() + assert args[0] == bigtable.ReadRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_rows_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.read_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc + request = {} + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_rows + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.read_rows + ] = mock_rpc + + request = {} + await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -733,7 +1412,7 @@ async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -753,7 +1432,8 @@ async def test_read_rows_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() + request = bigtable.ReadRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -765,64 +1445,21 @@ async def test_read_rows_async_from_dict(): await test_read_rows_async(request_type=dict) -def test_read_rows_routing_parameters(): +def test_read_rows_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - -def test_read_rows_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadRowsResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_rows( - table_name="table_name_value", - app_profile_id="app_profile_id_value", - ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_rows( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -854,7 +1491,7 @@ def test_read_rows_flattened_error(): @pytest.mark.asyncio async def test_read_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -885,7 +1522,7 @@ async def test_read_rows_flattened_async(): @pytest.mark.asyncio async def test_read_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -924,27 +1561,123 @@ def test_sample_row_keys(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() + request = bigtable.SampleRowKeysRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: assert isinstance(message, bigtable.SampleRowKeysResponse) -def test_sample_row_keys_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_sample_row_keys_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.SampleRowKeysRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - client.sample_row_keys() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.sample_row_keys(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() + assert args[0] == bigtable.SampleRowKeysRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.sample_row_keys in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc + request = {} + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_sample_row_keys_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.sample_row_keys + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.sample_row_keys + ] = mock_rpc + + request = {} + await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -952,7 +1685,7 @@ async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -972,7 +1705,8 @@ async def test_sample_row_keys_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() + request = bigtable.SampleRowKeysRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -984,49 +1718,6 @@ async def test_sample_row_keys_async_from_dict(): await test_sample_row_keys_async(request_type=dict) -def test_sample_row_keys_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_sample_row_keys_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1073,7 +1764,7 @@ def test_sample_row_keys_flattened_error(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1104,7 +1795,7 @@ async def test_sample_row_keys_flattened_async(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1143,26 +1834,118 @@ def test_mutate_row(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() + request = bigtable.MutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.MutateRowResponse) -def test_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_mutate_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.MutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - client.mutate_row() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.mutate_row(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() + assert args[0] == bigtable.MutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_row in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + request = {} + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.mutate_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.mutate_row + ] = mock_rpc + + request = {} + await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1170,7 +1953,7 @@ async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1189,7 +1972,8 @@ async def test_mutate_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() + request = bigtable.MutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.MutateRowResponse) @@ -1200,49 +1984,6 @@ async def test_mutate_row_async_from_dict(): await test_mutate_row_async(request_type=dict) -def test_mutate_row_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_mutate_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1311,7 +2052,7 @@ def test_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1360,7 +2101,7 @@ async def test_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1405,27 +2146,121 @@ def test_mutate_rows(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() + request = bigtable.MutateRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: assert isinstance(message, bigtable.MutateRowsResponse) -def test_mutate_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_mutate_rows_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.MutateRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - client.mutate_rows() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.mutate_rows(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() + assert args[0] == bigtable.MutateRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc + request = {} + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_mutate_rows_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.mutate_rows + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.mutate_rows + ] = mock_rpc + + request = {} + await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1433,7 +2268,7 @@ async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1453,7 +2288,8 @@ async def test_mutate_rows_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() + request = bigtable.MutateRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -1465,65 +2301,22 @@ async def test_mutate_rows_async_from_dict(): await test_mutate_rows_async(request_type=dict) -def test_mutate_rows_routing_parameters(): +def test_mutate_rows_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - -def test_mutate_rows_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.MutateRowsResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.mutate_rows( - table_name="table_name_value", - entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], - app_profile_id="app_profile_id_value", - ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_rows( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1559,7 +2352,7 @@ def test_mutate_rows_flattened_error(): @pytest.mark.asyncio async def test_mutate_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1594,7 +2387,7 @@ async def test_mutate_rows_flattened_async(): @pytest.mark.asyncio async def test_mutate_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1638,29 +2431,127 @@ def test_check_and_mutate_row(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() + request = bigtable.CheckAndMutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.CheckAndMutateRowResponse) assert response.predicate_matched is True -def test_check_and_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_check_and_mutate_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.CheckAndMutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.check_and_mutate_row), "__call__" ) as call: - client.check_and_mutate_row() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_and_mutate_row(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() + assert args[0] == bigtable.CheckAndMutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.check_and_mutate_row in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_and_mutate_row + ] = mock_rpc + request = {} + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_and_mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.check_and_mutate_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.check_and_mutate_row + ] = mock_rpc + + request = {} + await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.check_and_mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1668,7 +2559,7 @@ async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1691,7 +2582,8 @@ async def test_check_and_mutate_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() + request = bigtable.CheckAndMutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.CheckAndMutateRowResponse) @@ -1703,53 +2595,6 @@ async def test_check_and_mutate_row_async_from_dict(): await test_check_and_mutate_row_async(request_type=dict) -def test_check_and_mutate_row_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_check_and_mutate_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1870,7 +2715,7 @@ def test_check_and_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1955,7 +2800,7 @@ async def test_check_and_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2016,26 +2861,118 @@ def test_ping_and_warm(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() + request = bigtable.PingAndWarmRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.PingAndWarmResponse) -def test_ping_and_warm_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_ping_and_warm_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.PingAndWarmRequest( + name="name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - client.ping_and_warm() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.ping_and_warm(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() + assert args[0] == bigtable.PingAndWarmRequest( + name="name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_ping_and_warm_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.ping_and_warm in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc + request = {} + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_ping_and_warm_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.ping_and_warm + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.ping_and_warm + ] = mock_rpc + + request = {} + await client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2043,7 +2980,7 @@ async def test_ping_and_warm_async( transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2062,7 +2999,8 @@ async def test_ping_and_warm_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() + request = bigtable.PingAndWarmRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.PingAndWarmResponse) @@ -2073,49 +3011,6 @@ async def test_ping_and_warm_async_from_dict(): await test_ping_and_warm_async(request_type=dict) -def test_ping_and_warm_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest( - **{"name": "projects/sample1/instances/sample2"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_ping_and_warm_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2162,7 +3057,7 @@ def test_ping_and_warm_flattened_error(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2195,7 +3090,7 @@ async def test_ping_and_warm_flattened_async(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2236,28 +3131,127 @@ def test_read_modify_write_row(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() + request = bigtable.ReadModifyWriteRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_read_modify_write_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_read_modify_write_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadModifyWriteRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.read_modify_write_row), "__call__" ) as call: - client.read_modify_write_row() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_modify_write_row(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() + assert args[0] == bigtable.ReadModifyWriteRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_modify_write_row + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_modify_write_row + ] = mock_rpc + request = {} + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_modify_write_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.read_modify_write_row + ] = mock_rpc + + request = {} + await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2265,7 +3259,7 @@ async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2286,7 +3280,8 @@ async def test_read_modify_write_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() + request = bigtable.ReadModifyWriteRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.ReadModifyWriteRowResponse) @@ -2297,54 +3292,7 @@ async def test_read_modify_write_row_async_from_dict(): await test_read_modify_write_row_async(request_type=dict) -def test_read_modify_write_row_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - -def test_read_modify_write_row_flattened(): +def test_read_modify_write_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2402,7 +3350,7 @@ def test_read_modify_write_row_flattened_error(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2445,7 +3393,7 @@ async def test_read_modify_write_row_flattened_async(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2492,7 +3440,8 @@ def test_generate_initial_change_stream_partitions( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -2501,22 +3450,118 @@ def test_generate_initial_change_stream_partitions( ) -def test_generate_initial_change_stream_partitions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_generate_initial_change_stream_partitions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.generate_initial_change_stream_partitions), "__call__" ) as call: - client.generate_initial_change_stream_partitions() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.generate_initial_change_stream_partitions(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_generate_initial_change_stream_partitions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_initial_change_stream_partitions + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_initial_change_stream_partitions + ] = mock_rpc + request = {} + client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.generate_initial_change_stream_partitions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.generate_initial_change_stream_partitions + ] = mock_rpc + + request = {} + await client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2525,7 +3570,7 @@ async def test_generate_initial_change_stream_partitions_async( request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2547,7 +3592,8 @@ async def test_generate_initial_change_stream_partitions_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -2595,7 +3641,7 @@ def test_generate_initial_change_stream_partitions_field_headers(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2677,7 +3723,7 @@ def test_generate_initial_change_stream_partitions_flattened_error(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2712,7 +3758,7 @@ async def test_generate_initial_change_stream_partitions_flattened_async(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2753,29 +3799,125 @@ def test_read_change_stream(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() + request = bigtable.ReadChangeStreamRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: assert isinstance(message, bigtable.ReadChangeStreamResponse) -def test_read_change_stream_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. +def test_read_change_stream_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadChangeStreamRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.read_change_stream), "__call__" ) as call: - client.read_change_stream() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_change_stream(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() + assert args[0] == bigtable.ReadChangeStreamRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_change_stream_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_change_stream in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_change_stream + ] = mock_rpc + request = {} + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_change_stream_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_change_stream + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.read_change_stream + ] = mock_rpc + + request = {} + await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2783,7 +3925,7 @@ async def test_read_change_stream_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadChangeStreamRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2805,7 +3947,8 @@ async def test_read_change_stream_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() + request = bigtable.ReadChangeStreamRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -2851,7 +3994,7 @@ def test_read_change_stream_field_headers(): @pytest.mark.asyncio async def test_read_change_stream_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2931,7 +4074,7 @@ def test_read_change_stream_flattened_error(): @pytest.mark.asyncio async def test_read_change_stream_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2964,7 +4107,7 @@ async def test_read_change_stream_flattened_async(): @pytest.mark.asyncio async def test_read_change_stream_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2980,442 +4123,5110 @@ async def test_read_change_stream_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable.ReadRowsRequest, + bigtable.PrepareQueryRequest, dict, ], ) -def test_read_rows_rest(request_type): +def test_prepare_query(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse( - last_scanned_row_key=b"last_scanned_row_key_blob", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", ) + response = client.prepare_query(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable.PrepareQueryRequest() + assert args[0] == request - json_return_value = "[{}]".format(json_return_value) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) - assert isinstance(response, Iterable) - response = next(response) +def test_prepare_query_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadRowsResponse) - assert response.last_scanned_row_key == b"last_scanned_row_key_blob" + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.PrepareQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.prepare_query(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PrepareQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) -def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): - transport_class = transports.BigtableRestTransport - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, +def test_prepare_query_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - ) - # verify fields with default values are dropped + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert client._transport.prepare_query in client._transport._wrapped_methods - # verify required fields with default values are now present + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.prepare_query] = mock_rpc + request = {} + client.prepare_query(request) - jsonified_request["tableName"] = "table_name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + client.prepare_query(request) - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result +@pytest.mark.asyncio +async def test_prepare_query_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value = Response() - response_value.status_code = 200 + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) + # Ensure method has been cached + assert ( + client._client._transport.prepare_query + in client._client._transport._wrapped_methods + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.prepare_query + ] = mock_rpc - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) + request = {} + await client.prepare_query(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + await client.prepare_query(request) -def test_read_rows_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - unset_fields = transport.read_rows._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName",))) +@pytest.mark.asyncio +async def test_prepare_query_async( + transport: str = "grpc_asyncio", request_type=bigtable.PrepareQueryRequest +): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_rows" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_rows" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadRowsResponse.to_json( - bigtable.ReadRowsResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) ) - req.return_value._content = "[{}]".format(req.return_value._content) + response = await client.prepare_query(request) - request = bigtable.ReadRowsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadRowsResponse() + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable.PrepareQueryRequest() + assert args[0] == request - client.read_rows( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" - pre.assert_called_once() - post.assert_called_once() +@pytest.mark.asyncio +async def test_prepare_query_async_from_dict(): + await test_prepare_query_async(request_type=dict) -def test_read_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadRowsRequest -): + +def test_prepare_query_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.prepare_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_rows(request) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val -def test_read_rows_rest_flattened(): +def test_prepare_query_flattened_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", app_profile_id="app_profile_id_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.read_rows(**mock_args) +@pytest.mark.asyncio +async def test_prepare_query_flattened_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.prepare_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val -def test_read_rows_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_prepare_query_flattened_error_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.read_rows( - bigtable.ReadRowsRequest(), - table_name="table_name_value", + await client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", app_profile_id="app_profile_id_value", ) -def test_read_rows_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - @pytest.mark.parametrize( "request_type", [ - bigtable.SampleRowKeysRequest, + bigtable.ExecuteQueryRequest, dict, ], ) -def test_sample_row_keys_rest(request_type): +def test_execute_query(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse( - row_key=b"row_key_blob", - offset_bytes=1293, + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + response = client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable.ExecuteQueryRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ExecuteQueryResponse) + + +def test_execute_query_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ExecuteQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.execute_query(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ExecuteQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) +def test_execute_query_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - assert isinstance(response, Iterable) - response = next(response) + # Ensure method has been cached + assert client._transport.execute_query in client._transport._wrapped_methods - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.SampleRowKeysResponse) - assert response.row_key == b"row_key_blob" - assert response.offset_bytes == 1293 + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc + request = {} + client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.execute_query(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_sample_row_keys_rest_required_fields( - request_type=bigtable.SampleRowKeysRequest, + +@pytest.mark.asyncio +async def test_execute_query_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.execute_query + in client._client._transport._wrapped_methods ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.execute_query + ] = mock_rpc + + request = {} + await client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.execute_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_execute_query_async( + transport: str = "grpc_asyncio", request_type=bigtable.ExecuteQueryRequest +): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).sample_row_keys._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + response = await client.execute_query(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable.ExecuteQueryRequest() + assert args[0] == request - jsonified_request["tableName"] = "table_name_value" + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ExecuteQueryResponse) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).sample_row_keys._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("app_profile_id",)) + +@pytest.mark.asyncio +async def test_execute_query_async_from_dict(): + await test_execute_query_async(request_type=dict) + + +def test_execute_query_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.execute_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +def test_execute_query_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_execute_query_flattened_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.execute_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_execute_query_flattened_error_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_rows_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.read_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc + + request = {} + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_read_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" + % client.transport._host, + args[1], + ) + + +def test_read_rows_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.sample_row_keys in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc + + request = {} + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_sample_row_keys_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.sample_row_keys(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + % client.transport._host, + args[1], + ) + + +def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_row in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + + request = {} + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.mutate_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "rowKey", + "mutations", + ) + ) + ) + + +def test_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" + % client.transport._host, + args[1], + ) + + +def test_mutate_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc + + request = {} + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_mutate_rows_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.mutate_rows._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("entries",))) + + +def test_mutate_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.mutate_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" + % client.transport._host, + args[1], + ) + + +def test_mutate_rows_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.check_and_mutate_row in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_and_mutate_row + ] = mock_rpc + + request = {} + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_and_mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_check_and_mutate_row_rest_required_fields( + request_type=bigtable.CheckAndMutateRowRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.check_and_mutate_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_check_and_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("rowKey",))) + + +def test_check_and_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.check_and_mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" + % client.transport._host, + args[1], + ) + + +def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_ping_and_warm_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.ping_and_warm in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc + + request = {} + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.ping_and_warm(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_ping_and_warm_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.ping_and_warm._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_ping_and_warm_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.ping_and_warm(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1] + ) + + +def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name="name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_modify_write_row + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_modify_write_row + ] = mock_rpc + + request = {} + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_read_modify_write_row_rest_required_fields( + request_type=bigtable.ReadModifyWriteRowRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.read_modify_write_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_read_modify_write_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "rowKey", + "rules", + ) + ) + ) + + +def test_read_modify_write_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.read_modify_write_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" + % client.transport._host, + args[1], + ) + + +def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_initial_change_stream_partitions + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_initial_change_stream_partitions + ] = mock_rpc + + request = {} + client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_generate_initial_change_stream_partitions_rest_required_fields( + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.generate_initial_change_stream_partitions._get_unset_required_fields( + {} + ) + ) + assert set(unset_fields) == (set(()) & set(("tableName",))) + + +def test_generate_initial_change_stream_partitions_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.generate_initial_change_stream_partitions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" + % client.transport._host, + args[1], + ) + + +def test_generate_initial_change_stream_partitions_rest_flattened_error( + transport: str = "rest", +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_change_stream_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_change_stream in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_change_stream + ] = mock_rpc + + request = {} + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_read_change_stream_rest_required_fields( + request_type=bigtable.ReadChangeStreamRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_change_stream(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_read_change_stream_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.read_change_stream._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName",))) + + +def test_read_change_stream_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_change_stream(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" + % client.transport._host, + args[1], + ) + + +def test_read_change_stream_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_prepare_query_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.prepare_query in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.prepare_query] = mock_rpc + + request = {} + client.prepare_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.prepare_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_prepare_query_rest_required_fields(request_type=bigtable.PrepareQueryRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["instance_name"] = "" + request_init["query"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).prepare_query._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["instanceName"] = "instance_name_value" + jsonified_request["query"] = "query_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).prepare_query._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "instanceName" in jsonified_request + assert jsonified_request["instanceName"] == "instance_name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.PrepareQueryResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.prepare_query(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_prepare_query_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.prepare_query._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "instanceName", + "query", + "paramTypes", + ) + ) + ) + + +def test_prepare_query_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PrepareQueryResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"instance_name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.prepare_query(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance_name=projects/*/instances/*}:prepareQuery" + % client.transport._host, + args[1], + ) + + +def test_prepare_query_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +def test_execute_query_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.execute_query in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc + + request = {} + client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.execute_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["instance_name"] = "" + request_init["query"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).execute_query._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["instanceName"] = "instance_name_value" + jsonified_request["query"] = "query_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).execute_query._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" + # verify required fields with non-default values are left alone + assert "instanceName" in jsonified_request + assert jsonified_request["instanceName"] == "instance_name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ExecuteQueryResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ExecuteQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.execute_query(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_execute_query_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.execute_query._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "instanceName", + "query", + "params", + ) + ) + ) + + +def test_execute_query_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ExecuteQueryResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"instance_name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ExecuteQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.execute_query(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance_name=projects/*/instances/*}:executeQuery" + % client.transport._host, + args[1], + ) + + +def test_execute_query_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_rows_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_sample_row_keys_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_rows_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_and_mutate_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_ping_and_warm_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_modify_write_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_initial_change_stream_partitions_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_change_stream_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_prepare_query_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_execute_query_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +def test_read_rows_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_4_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_4_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_rows_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_rows_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_rows_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_check_and_mutate_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_check_and_mutate_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_check_and_mutate_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_ping_and_warm_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request={"name": "projects/sample1/instances/sample2"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_ping_and_warm_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_modify_write_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_modify_write_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_modify_write_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_prepare_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_prepare_query_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_execute_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_execute_query_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_rows_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_sample_row_keys_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_mutate_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_mutate_rows_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_check_and_mutate_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_ping_and_warm_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_modify_write_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + await client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_change_stream_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadChangeStreamResponse()] + ) + await client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_prepare_query_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_execute_query_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_4_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_4_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm( + request={"name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_transport_kind_rest(): + transport = BigtableClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + +def test_read_rows_rest_bad_request(request_type=bigtable.ReadRowsRequest): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.read_rows(request) - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadRowsRequest, + dict, + ], +) +def test_read_rows_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse( + last_scanned_row_key=b"last_scanned_row_key_blob", + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.read_rows(request) -def test_sample_row_keys_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert isinstance(response, Iterable) + response = next(response) - unset_fields = transport.sample_row_keys._get_unset_required_fields({}) - assert set(unset_fields) == (set(("appProfileId",)) & set(("tableName",))) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadRowsResponse) + assert response.last_scanned_row_key == b"last_scanned_row_key_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_sample_row_keys_rest_interceptors(null_interceptor): +def test_read_rows_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_sample_row_keys" + transports.BigtableRestInterceptor, "post_read_rows" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_sample_row_keys" + transports.BigtableRestInterceptor, "post_read_rows_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_rows" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + post_with_metadata.assert_not_called() + pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -3423,23 +9234,22 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.SampleRowKeysResponse.to_json( - bigtable.SampleRowKeysResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) - request = bigtable.SampleRowKeysRequest() + request = bigtable.ReadRowsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.SampleRowKeysResponse() + post.return_value = bigtable.ReadRowsResponse() + post_with_metadata.return_value = bigtable.ReadRowsResponse(), metadata - client.sample_row_keys( + client.read_rows( request, metadata=[ ("key", "val"), @@ -3449,16 +9259,13 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_sample_row_keys_rest_bad_request( - transport: str = "rest", request_type=bigtable.SampleRowKeysRequest -): +def test_sample_row_keys_rest_bad_request(request_type=bigtable.SampleRowKeysRequest): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -3468,93 +9275,26 @@ def test_sample_row_keys_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.sample_row_keys(request) -def test_sample_row_keys_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.sample_row_keys(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" - % client.transport._host, - args[1], - ) - - -def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.sample_row_keys( - bigtable.SampleRowKeysRequest(), - table_name="table_name_value", - app_profile_id="app_profile_id_value", - ) - - -def test_sample_row_keys_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - @pytest.mark.parametrize( "request_type", [ - bigtable.MutateRowRequest, + bigtable.SampleRowKeysRequest, dict, ], ) -def test_mutate_row_rest(request_type): +def test_sample_row_keys_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -3564,139 +9304,56 @@ def test_mutate_row_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.mutate_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) - - -def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b"" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + return_value = bigtable.SampleRowKeysResponse( + row_key=b"row_key_blob", + offset_bytes=1293, ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = "table_name_value" - jsonified_request["rowKey"] = b"row_key_blob" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" - - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.mutate_row(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_mutate_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.sample_row_keys(request) - unset_fields = transport.mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "rowKey", - "mutations", - ) - ) - ) + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.SampleRowKeysResponse) + assert response.row_key == b"row_key_blob" + assert response.offset_bytes == 1293 @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_row_rest_interceptors(null_interceptor): +def test_sample_row_keys_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_mutate_row" + transports.BigtableRestInterceptor, "post_sample_row_keys" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_mutate_row" + transports.BigtableRestInterceptor, "post_sample_row_keys_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_sample_row_keys" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + post_with_metadata.assert_not_called() + pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -3704,22 +9361,24 @@ def test_mutate_row_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowResponse.to_json( - bigtable.MutateRowResponse() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.SampleRowKeysResponse.to_json( + bigtable.SampleRowKeysResponse() ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) - request = bigtable.MutateRowRequest() + request = bigtable.SampleRowKeysRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.MutateRowResponse() + post.return_value = bigtable.SampleRowKeysResponse() + post_with_metadata.return_value = bigtable.SampleRowKeysResponse(), metadata - client.mutate_row( + client.sample_row_keys( request, metadata=[ ("key", "val"), @@ -3729,16 +9388,13 @@ def test_mutate_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowRequest -): +def test_mutate_row_rest_bad_request(request_type=bigtable.MutateRowRequest): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -3748,89 +9404,132 @@ def test_mutate_row_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_row(request) -def test_mutate_row_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowRequest, + dict, + ], +) +def test_mutate_row_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = bigtable.MutateRowResponse() - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - row_key=b"row_key_blob", - mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.mutate_row(**mock_args) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.mutate_row(request) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) -def test_mutate_row_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_row_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) + req.return_value.content = return_value + + request = bigtable.MutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowResponse() + post_with_metadata.return_value = bigtable.MutateRowResponse(), metadata - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): client.mutate_row( - bigtable.MutateRowRequest(), - table_name="table_name_value", - row_key=b"row_key_blob", - mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), ], - app_profile_id="app_profile_id_value", ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_mutate_row_rest_error(): +def test_mutate_rows_rest_bad_request(request_type=bigtable.MutateRowsRequest): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.mutate_rows(request) @pytest.mark.parametrize( @@ -3840,10 +9539,9 @@ def test_mutate_row_rest_error(): dict, ], ) -def test_mutate_rows_rest(request_type): +def test_mutate_rows_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -3856,120 +9554,23 @@ def test_mutate_rows_rest(request_type): return_value = bigtable.MutateRowsResponse() # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowsResponse) - - -def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).mutate_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = "table_name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).mutate_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.mutate_rows(request) -def test_mutate_rows_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert isinstance(response, Iterable) + response = next(response) - unset_fields = transport.mutate_rows._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "entries", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowsResponse) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -3979,6 +9580,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -3986,10 +9588,13 @@ def test_mutate_rows_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_mutate_rows" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_rows_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_mutate_rows" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) transcode.return_value = { "method": "post", @@ -3998,13 +9603,13 @@ def test_mutate_rows_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowsResponse.to_json( + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.MutateRowsResponse.to_json( bigtable.MutateRowsResponse() ) - req.return_value._content = "[{}]".format(req.return_value._content) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) request = bigtable.MutateRowsRequest() metadata = [ @@ -4013,6 +9618,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.MutateRowsResponse() + post_with_metadata.return_value = bigtable.MutateRowsResponse(), metadata client.mutate_rows( request, @@ -4024,16 +9630,15 @@ def test_mutate_rows_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_mutate_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowsRequest +def test_check_and_mutate_row_rest_bad_request( + request_type=bigtable.CheckAndMutateRowRequest, ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -4043,82 +9648,14 @@ def test_mutate_rows_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.mutate_rows(request) - - -def test_mutate_rows_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.mutate_rows(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" - % client.transport._host, - args[1], - ) - - -def test_mutate_rows_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_rows( - bigtable.MutateRowsRequest(), - table_name="table_name_value", - entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], - app_profile_id="app_profile_id_value", - ) - - -def test_mutate_rows_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.check_and_mutate_row(request) @pytest.mark.parametrize( @@ -4128,10 +9665,9 @@ def test_mutate_rows_rest_error(): dict, ], ) -def test_check_and_mutate_row_rest(request_type): +def test_check_and_mutate_row_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -4146,13 +9682,15 @@ def test_check_and_mutate_row_rest(request_type): ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_and_mutate_row(request) # Establish that the response is the type that we expect. @@ -4160,126 +9698,151 @@ def test_check_and_mutate_row_rest(request_type): assert response.predicate_matched is True -def test_check_and_mutate_row_rest_required_fields( - request_type=bigtable.CheckAndMutateRowRequest, -): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b"" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_and_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) + client = BigtableClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_check_and_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_check_and_mutate_row_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_check_and_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.CheckAndMutateRowRequest.pb( + bigtable.CheckAndMutateRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_and_mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.CheckAndMutateRowResponse.to_json( + bigtable.CheckAndMutateRowResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable.CheckAndMutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.CheckAndMutateRowResponse() + post_with_metadata.return_value = bigtable.CheckAndMutateRowResponse(), metadata - jsonified_request["tableName"] = "table_name_value" - jsonified_request["rowKey"] = b"row_key_blob" + client.check_and_mutate_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).check_and_mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" +def test_ping_and_warm_rest_bad_request(request_type=bigtable.PingAndWarmRequest): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.ping_and_warm(request) - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PingAndWarmRequest, + dict, + ], +) +def test_ping_and_warm_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response = client.check_and_mutate_row(request) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 -def test_check_and_mutate_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.ping_and_warm(request) - unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "rowKey", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_and_mutate_row_rest_interceptors(null_interceptor): +def test_ping_and_warm_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_check_and_mutate_row" + transports.BigtableRestInterceptor, "post_ping_and_warm" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_check_and_mutate_row" + transports.BigtableRestInterceptor, "post_ping_and_warm_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_ping_and_warm" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.CheckAndMutateRowRequest.pb( - bigtable.CheckAndMutateRowRequest() - ) + post_with_metadata.assert_not_called() + pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -4287,22 +9850,24 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json( - bigtable.CheckAndMutateRowResponse() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.PingAndWarmResponse.to_json( + bigtable.PingAndWarmResponse() ) + req.return_value.content = return_value - request = bigtable.CheckAndMutateRowRequest() + request = bigtable.PingAndWarmRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.CheckAndMutateRowResponse() + post.return_value = bigtable.PingAndWarmResponse() + post_with_metadata.return_value = bigtable.PingAndWarmResponse(), metadata - client.check_and_mutate_row( + client.ping_and_warm( request, metadata=[ ("key", "val"), @@ -4312,16 +9877,15 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_check_and_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest +def test_read_modify_write_row_rest_bad_request( + request_type=bigtable.ReadModifyWriteRowRequest, ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -4331,263 +9895,214 @@ def test_check_and_mutate_row_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_and_mutate_row(request) - - -def test_check_and_mutate_row_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - row_key=b"row_key_blob", - predicate_filter=data.RowFilter( - chain=data.RowFilter.Chain( - filters=[ - data.RowFilter( - chain=data.RowFilter.Chain( - filters=[data.RowFilter(chain=None)] - ) - ) - ] - ) - ), - true_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - false_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.check_and_mutate_row(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" - % client.transport._host, - args[1], - ) - - -def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_and_mutate_row( - bigtable.CheckAndMutateRowRequest(), - table_name="table_name_value", - row_key=b"row_key_blob", - predicate_filter=data.RowFilter( - chain=data.RowFilter.Chain( - filters=[ - data.RowFilter( - chain=data.RowFilter.Chain( - filters=[data.RowFilter(chain=None)] - ) - ) - ] - ) - ), - true_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - false_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - app_profile_id="app_profile_id_value", - ) - - -def test_check_and_mutate_row_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.read_modify_write_row(request) @pytest.mark.parametrize( "request_type", [ - bigtable.PingAndWarmRequest, + bigtable.ReadModifyWriteRowRequest, dict, ], ) -def test_ping_and_warm_rest(request_type): +def test_read_modify_write_row_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() + return_value = bigtable.ReadModifyWriteRowResponse() # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.ping_and_warm(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.read_modify_write_row(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) - + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): - transport_class = transports.BigtableRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_modify_write_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) + client = BigtableClient(transport=transport) - # verify fields with default values are dropped + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_modify_write_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_modify_write_row_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_modify_write_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.ReadModifyWriteRowRequest.pb( + bigtable.ReadModifyWriteRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).ping_and_warm._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.ReadModifyWriteRowResponse.to_json( + bigtable.ReadModifyWriteRowResponse() + ) + req.return_value.content = return_value - # verify required fields with default values are now present + request = bigtable.ReadModifyWriteRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadModifyWriteRowResponse() + post_with_metadata.return_value = ( + bigtable.ReadModifyWriteRowResponse(), + metadata, + ) - jsonified_request["name"] = "name_value" + client.read_modify_write_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).ping_and_warm._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_generate_initial_change_stream_partitions_rest_bad_request( + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.generate_initial_change_stream_partitions(request) - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, + ], +) +def test_generate_initial_change_stream_partitions_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) - response = client.ping_and_warm(request) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.generate_initial_change_stream_partitions(request) -def test_ping_and_warm_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert isinstance(response, Iterable) + response = next(response) - unset_fields = transport.ping_and_warm._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_ping_and_warm_rest_interceptors(null_interceptor): +def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_ping_and_warm" + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions", ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_ping_and_warm" + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, + "pre_generate_initial_change_stream_partitions", ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) + post_with_metadata.assert_not_called() + pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + bigtable.GenerateInitialChangeStreamPartitionsRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -4595,22 +10110,27 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.PingAndWarmResponse.to_json( - bigtable.PingAndWarmResponse() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( + bigtable.GenerateInitialChangeStreamPartitionsResponse() ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) - request = bigtable.PingAndWarmRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.PingAndWarmResponse() + post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + post_with_metadata.return_value = ( + bigtable.GenerateInitialChangeStreamPartitionsResponse(), + metadata, + ) - client.ping_and_warm( + client.generate_initial_change_stream_partitions( request, metadata=[ ("key", "val"), @@ -4620,18 +10140,17 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_ping_and_warm_rest_bad_request( - transport: str = "rest", request_type=bigtable.PingAndWarmRequest +def test_read_change_stream_rest_bad_request( + request_type=bigtable.ReadChangeStreamRequest, ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -4639,232 +10158,329 @@ def test_ping_and_warm_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.ping_and_warm(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.read_change_stream(request) -def test_ping_and_warm_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadChangeStreamRequest, + dict, + ], +) +def test_read_change_stream_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) + return_value = bigtable.ReadChangeStreamResponse() # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.read_change_stream(request) - client.ping_and_warm(**mock_args) + assert isinstance(response, Iterable) + response = next(response) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1] - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadChangeStreamResponse) -def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): - client = BigtableClient( +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_change_stream_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) + client = BigtableClient(transport=transport) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.ping_and_warm( - bigtable.PingAndWarmRequest(), - name="name_value", - app_profile_id="app_profile_id_value", + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_change_stream" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_change_stream_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_change_stream" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.ReadChangeStreamRequest.pb( + bigtable.ReadChangeStreamRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.ReadChangeStreamResponse.to_json( + bigtable.ReadChangeStreamResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.ReadChangeStreamRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadChangeStreamResponse() + post_with_metadata.return_value = bigtable.ReadChangeStreamResponse(), metadata + + client.read_change_stream( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + -def test_ping_and_warm_rest_error(): +def test_prepare_query_rest_bad_request(request_type=bigtable.PrepareQueryRequest): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.prepare_query(request) @pytest.mark.parametrize( "request_type", [ - bigtable.ReadModifyWriteRowRequest, + bigtable.PrepareQueryRequest, dict, ], ) -def test_read_modify_write_row_rest(request_type): +def test_prepare_query_rest_call_success(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"instance_name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() + return_value = bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.read_modify_write_row(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.prepare_query(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) - + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" -def test_read_modify_write_row_rest_required_fields( - request_type=bigtable.ReadModifyWriteRowRequest, -): - transport_class = transports.BigtableRestTransport - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b"" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, - ) +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_prepare_query_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) + client = BigtableClient(transport=transport) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_modify_write_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_prepare_query" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_prepare_query_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_prepare_query" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.PrepareQueryRequest.pb(bigtable.PrepareQueryRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # verify required fields with default values are now present + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.PrepareQueryResponse.to_json( + bigtable.PrepareQueryResponse() + ) + req.return_value.content = return_value - jsonified_request["tableName"] = "table_name_value" - jsonified_request["rowKey"] = b"row_key_blob" + request = bigtable.PrepareQueryRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.PrepareQueryResponse() + post_with_metadata.return_value = bigtable.PrepareQueryResponse(), metadata - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_modify_write_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + client.prepare_query( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + +def test_execute_query_rest_bad_request(request_type=bigtable.ExecuteQueryRequest): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.execute_query(request) - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ExecuteQueryRequest, + dict, + ], +) +def test_execute_query_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) - response = client.read_modify_write_row(request) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ExecuteQueryResponse() - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ExecuteQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.execute_query(request) -def test_read_modify_write_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert isinstance(response, Iterable) + response = next(response) - unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "rowKey", - "rules", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ExecuteQueryResponse) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_modify_write_row_rest_interceptors(null_interceptor): +def test_execute_query_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_modify_write_row" + transports.BigtableRestInterceptor, "post_execute_query" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_modify_write_row" + transports.BigtableRestInterceptor, "post_execute_query_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_execute_query" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.ReadModifyWriteRowRequest.pb( - bigtable.ReadModifyWriteRowRequest() - ) + post_with_metadata.assert_not_called() + pb_message = bigtable.ExecuteQueryRequest.pb(bigtable.ExecuteQueryRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -4872,22 +10488,24 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json( - bigtable.ReadModifyWriteRowResponse() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.ExecuteQueryResponse.to_json( + bigtable.ExecuteQueryResponse() ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) - request = bigtable.ReadModifyWriteRowRequest() + request = bigtable.ExecuteQueryRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.ReadModifyWriteRowResponse() + post.return_value = bigtable.ExecuteQueryResponse() + post_with_metadata.return_value = bigtable.ExecuteQueryResponse(), metadata - client.read_modify_write_row( + client.execute_query( request, metadata=[ ("key", "val"), @@ -4897,791 +10515,1051 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_read_modify_write_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest -): +def test_initialize_client_w_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_rows_empty_call_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_modify_write_row(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + assert args[0] == request_msg -def test_read_modify_write_row_rest_flattened(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_sample_row_keys_empty_call_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - row_key=b"row_key_blob", - rules=[data.ReadModifyWriteRule(family_name="family_name_value")], - app_profile_id="app_profile_id_value", + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_rows_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_and_mutate_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_ping_and_warm_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_modify_write_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_initial_change_stream_partitions_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_change_stream_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_prepare_query_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_execute_query_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +def test_read_rows_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) - client.read_modify_write_row(**mock_args) + assert args[0] == request_msg - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" - % client.transport._host, - args[1], + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): +def test_read_rows_routing_parameters_request_2_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_modify_write_row( - bigtable.ReadModifyWriteRowRequest(), - table_name="table_name_value", - row_key=b"row_key_blob", - rules=[data.ReadModifyWriteRule(family_name="family_name_value")], - app_profile_id="app_profile_id_value", + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_read_modify_write_row_rest_error(): +def test_read_rows_routing_parameters_request_3_rest(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable.GenerateInitialChangeStreamPartitionsRequest, - dict, - ], -) -def test_generate_initial_change_stream_partitions_rest(request_type): + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_4_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} ) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - assert isinstance(response, Iterable) - response = next(response) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) +def test_sample_row_keys_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) -def test_generate_initial_change_stream_partitions_rest_required_fields( - request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, -): - transport_class = transports.BigtableRestTransport + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) - ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - # verify fields with default values are dropped - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_initial_change_stream_partitions._get_unset_required_fields( - jsonified_request +def test_sample_row_keys_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys(request={"app_profile_id": "sample1"}) - jsonified_request["tableName"] = "table_name_value" + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).generate_initial_change_stream_partitions._get_unset_required_fields( - jsonified_request - ) - jsonified_request.update(unset_fields) + assert args[0] == request_msg - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + +def test_sample_row_keys_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + ) - response_value = Response() - response_value.status_code = 200 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +def test_sample_row_keys_routing_parameters_request_4_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) -def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) - unset_fields = ( - transport.generate_initial_change_stream_partitions._get_unset_required_fields( - {} + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) - ) - assert set(unset_fields) == (set(()) & set(("tableName",))) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_mutate_row_routing_parameters_request_1_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, - "post_generate_initial_change_stream_partitions", - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, - "pre_generate_initial_change_stream_partitions", - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( - bigtable.GenerateInitialChangeStreamPartitionsRequest() + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( - bigtable.GenerateInitialChangeStreamPartitionsResponse() - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) - req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + assert args[0] == request_msg - client.generate_initial_change_stream_partitions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) - - pre.assert_called_once() - post.assert_called_once() + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_generate_initial_change_stream_partitions_rest_bad_request( - transport: str = "rest", - request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, -): +def test_mutate_row_routing_parameters_request_2_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row(request={"app_profile_id": "sample1"}) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.generate_initial_change_stream_partitions(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + assert args[0] == request_msg -def test_generate_initial_change_stream_partitions_rest_flattened(): + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_row_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } ) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.generate_initial_change_stream_partitions(**mock_args) + assert args[0] == request_msg - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" - % client.transport._host, - args[1], + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_generate_initial_change_stream_partitions_rest_flattened_error( - transport: str = "rest", -): +def test_mutate_rows_routing_parameters_request_1_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_initial_change_stream_partitions( - bigtable.GenerateInitialChangeStreamPartitionsRequest(), - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_generate_initial_change_stream_partitions_rest_error(): +def test_mutate_rows_routing_parameters_request_2_rest(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows(request={"app_profile_id": "sample1"}) -@pytest.mark.parametrize( - "request_type", - [ - bigtable.ReadChangeStreamRequest, - dict, - ], -) -def test_read_change_stream_rest(request_type): + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_mutate_rows_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + assert args[0] == request_msg - json_return_value = "[{}]".format(json_return_value) + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) - assert isinstance(response, Iterable) - response = next(response) +def test_check_and_mutate_row_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadChangeStreamResponse) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) -def test_read_change_stream_rest_required_fields( - request_type=bigtable.ReadChangeStreamRequest, -): - transport_class = transports.BigtableRestTransport + assert args[0] == request_msg - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False, + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) - ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - # verify fields with default values are dropped - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_change_stream._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) +def test_check_and_mutate_row_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # verify required fields with default values are now present + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row(request={"app_profile_id": "sample1"}) - jsonified_request["tableName"] = "table_name_value" + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_change_stream._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + assert args[0] == request_msg - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + +def test_check_and_mutate_row_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + ) - response_value = Response() - response_value.status_code = 200 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +def test_ping_and_warm_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request={"name": "projects/sample1/instances/sample2"}) -def test_read_change_stream_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} + ) - unset_fields = transport.read_change_stream._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName",))) + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_change_stream_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_ping_and_warm_routing_parameters_request_2_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_change_stream" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_change_stream" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadChangeStreamRequest.pb( - bigtable.ReadChangeStreamRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadChangeStreamResponse.to_json( - bigtable.ReadChangeStreamResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request={"app_profile_id": "sample1"}) - request = bigtable.ReadChangeStreamRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadChangeStreamResponse() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) - client.read_change_stream( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + assert args[0] == request_msg - pre.assert_called_once() - post.assert_called_once() + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_read_change_stream_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest -): +def test_read_modify_write_row_routing_parameters_request_1_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_change_stream(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + assert args[0] == request_msg -def test_read_change_stream_rest_flattened(): + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_modify_write_row_routing_parameters_request_2_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row(request={"app_profile_id": "sample1"}) - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.read_change_stream(**mock_args) + assert args[0] == request_msg - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" - % client.transport._host, - args[1], + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_read_change_stream_rest_flattened_error(transport: str = "rest"): +def test_read_modify_write_row_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_change_stream( - bigtable.ReadChangeStreamRequest(), - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" + } + ) -def test_read_change_stream_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - + assert args[0] == request_msg -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableGrpcTransport( + +def test_prepare_query_routing_parameters_request_1_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, - transport=transport, + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} ) - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableGrpcTransport( + +def test_prepare_query_routing_parameters_request_2_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( +def test_execute_query_routing_parameters_request_1_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - client = BigtableClient(transport=transport) - assert client.transport is transport + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) - transport = transports.BigtableGrpcAsyncIOTransport( + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_execute_query_routing_parameters_request_2_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query(request={"app_profile_id": "sample1"}) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - transports.BigtableRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name + expected_headers = {"app_profile_id": "sample1"} + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_transport_grpc_default(): @@ -5726,6 +11604,8 @@ def test_bigtable_base_transport(): "read_modify_write_row", "generate_initial_change_stream_partitions", "read_change_stream", + "prepare_query", + "execute_query", ) for method in methods: with pytest.raises(NotImplementedError): @@ -6036,6 +11916,12 @@ def test_bigtable_client_transport_session_collision(transport_name): session1 = client1.transport.read_change_stream._session session2 = client2.transport.read_change_stream._session assert session1 != session2 + session1 = client1.transport.prepare_query._session + session2 = client2.transport.prepare_query._session + assert session1 != session2 + session1 = client1.transport.execute_query._session + session2 = client2.transport.execute_query._session + assert session1 != session2 def test_bigtable_grpc_transport_channel(): @@ -6066,6 +11952,7 @@ def test_bigtable_grpc_asyncio_transport_channel(): # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.filterwarnings("ignore::FutureWarning") @pytest.mark.parametrize( "transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], @@ -6156,9 +12043,40 @@ def test_bigtable_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel -def test_instance_path(): +def test_authorized_view_path(): project = "squid" instance = "clam" + table = "whelk" + authorized_view = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + actual = BigtableClient.authorized_view_path( + project, instance, table, authorized_view + ) + assert expected == actual + + +def test_parse_authorized_view_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "table": "cuttlefish", + "authorized_view": "mussel", + } + path = BigtableClient.authorized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_authorized_view_path(path) + assert expected == actual + + +def test_instance_path(): + project = "winkle" + instance = "nautilus" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, @@ -6169,8 +12087,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "whelk", - "instance": "octopus", + "project": "scallop", + "instance": "abalone", } path = BigtableClient.instance_path(**expected) @@ -6179,10 +12097,36 @@ def test_parse_instance_path(): assert expected == actual +def test_materialized_view_path(): + project = "squid" + instance = "clam" + materialized_view = "whelk" + expected = "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + actual = BigtableClient.materialized_view_path(project, instance, materialized_view) + assert expected == actual + + +def test_parse_materialized_view_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "materialized_view": "nudibranch", + } + path = BigtableClient.materialized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_materialized_view_path(path) + assert expected == actual + + def test_table_path(): - project = "oyster" - instance = "nudibranch" - table = "cuttlefish" + project = "cuttlefish" + instance = "mussel" + table = "winkle" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, @@ -6194,9 +12138,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "mussel", - "instance": "winkle", - "table": "nautilus", + "project": "nautilus", + "instance": "scallop", + "table": "abalone", } path = BigtableClient.table_path(**expected) @@ -6206,7 +12150,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6216,7 +12160,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "clam", } path = BigtableClient.common_billing_account_path(**expected) @@ -6226,7 +12170,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "whelk" expected = "folders/{folder}".format( folder=folder, ) @@ -6236,7 +12180,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "octopus", } path = BigtableClient.common_folder_path(**expected) @@ -6246,7 +12190,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "oyster" expected = "organizations/{organization}".format( organization=organization, ) @@ -6256,7 +12200,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "nudibranch", } path = BigtableClient.common_organization_path(**expected) @@ -6266,7 +12210,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "cuttlefish" expected = "projects/{project}".format( project=project, ) @@ -6276,7 +12220,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "mussel", } path = BigtableClient.common_project_path(**expected) @@ -6286,8 +12230,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6298,8 +12242,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "scallop", + "location": "abalone", } path = BigtableClient.common_location_path(**expected) @@ -6331,36 +12275,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): +async def test_transport_close_grpc_asyncio(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): @@ -6401,7 +12350,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/tests/unit/test_packaging.py b/tests/unit/test_packaging.py new file mode 100644 index 000000000..93fa4d1c3 --- /dev/null +++ b/tests/unit/test_packaging.py @@ -0,0 +1,37 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +def test_namespace_package_compat(tmp_path): + # The ``google`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google = tmp_path / "google" + google.mkdir() + google.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.othermod"] + subprocess.check_call(cmd, env=env) + + # The ``google.cloud`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google_cloud = tmp_path / "google" / "cloud" + google_cloud.mkdir() + google_cloud.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.cloud.othermod"] + subprocess.check_call(cmd, env=env) diff --git a/tests/unit/test_sql_routing_parameters.py b/tests/unit/test_sql_routing_parameters.py new file mode 100644 index 000000000..fa9316369 --- /dev/null +++ b/tests/unit/test_sql_routing_parameters.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore # noqa: F401 +except ImportError: # pragma: NO COVER + import mock +import pytest + +from grpc.experimental import aio + +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable.client import BigtableClient +from google.cloud.bigtable_v2.types import bigtable + +# This test file duplicates the gapic request header tests so that the temporary fix +# for SQL app_profile_id header handling can not be override by GAPIC. +# TODO: remove this once the fix is upstreamed + + +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + +def test_prepare_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_execute_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) diff --git a/tests/unit/v2_client/__init__.py b/tests/unit/v2_client/__init__.py new file mode 100644 index 000000000..e8e1c3845 --- /dev/null +++ b/tests/unit/v2_client/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/_testing.py b/tests/unit/v2_client/_testing.py similarity index 100% rename from tests/unit/_testing.py rename to tests/unit/v2_client/_testing.py diff --git a/tests/unit/v2_client/read-rows-acceptance-test.json b/tests/unit/v2_client/read-rows-acceptance-test.json new file mode 100644 index 000000000..011ace2b9 --- /dev/null +++ b/tests/unit/v2_client/read-rows-acceptance-test.json @@ -0,0 +1,1665 @@ +{ + "readRowsTests": [ + { + "description": "invalid - no commit", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no cell key before commit", + "chunks": [ + { + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no cell key before value", + "chunks": [ + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - new col family must specify qualifier", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "bare commit implies ts=0", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "simple row with timestamp", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "missing timestamp, implied ts=0", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" + } + ] + }, + { + "description": "empty cell value", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "two unsplit cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "two qualifiers", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "two families", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "with labels", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "labels": [ + "L_2" + ], + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1", + "label": "L_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2", + "label": "L_2" + } + ] + }, + { + "description": "split cell, bare commit", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "split cell", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "split four ways", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "bA==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "dWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL", + "label": "L" + } + ] + }, + { + "description": "two split cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-qualifier splits", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-qualifier multi-split", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8x", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8y", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-family split", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "invalid - no commit between rows", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no commit after first row", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - last row missing commit", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "invalid - duplicate row key", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "invalid - new row missing row key", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "two rows", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows implicit timestamp", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows empty value", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows, one with multiple cells", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "two rows, multiple cells", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "qualifier": "Rg==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "97", + "value": "value-VAL_3" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "F", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows, multiple cells, multiple families", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "M", + "qualifier": "Tw==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "familyName": "N", + "qualifier": "UA==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "M", + "qualifier": "O", + "timestampMicros": "97", + "value": "value-VAL_3" + }, + { + "rowKey": "RK_2", + "familyName": "N", + "qualifier": "P", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows, four cells, 2 labels", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "labels": [ + "L_3" + ], + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1", + "label": "L_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3", + "label": "L_3" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows with splits, same timestamp", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "invalid - bare reset", + "chunks": [ + { + "resetRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - bad reset, no commit", + "chunks": [ + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - missing key after reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "no data after reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + } + ] + }, + { + "description": "simple reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "reset to new val", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset to new qual", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "100", + "value": "value-VAL_1" + } + ] + }, + { + "description": "reset with splits", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset two cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "two resets", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_3" + } + ] + }, + { + "description": "reset then two cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "B", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "reset to new row", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset in between chunks", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" + } + ] + }, + { + "description": "invalid - reset with chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "resetRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - commit with chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "empty cell chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + } + ] +} diff --git a/tests/unit/test_app_profile.py b/tests/unit/v2_client/test_app_profile.py similarity index 100% rename from tests/unit/test_app_profile.py rename to tests/unit/v2_client/test_app_profile.py diff --git a/tests/unit/test_backup.py b/tests/unit/v2_client/test_backup.py similarity index 97% rename from tests/unit/test_backup.py rename to tests/unit/v2_client/test_backup.py index 9882ca339..a5d205af6 100644 --- a/tests/unit/test_backup.py +++ b/tests/unit/v2_client/test_backup.py @@ -19,7 +19,6 @@ import pytest from ._testing import _make_credentials -from google.cloud._helpers import UTC PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" @@ -38,13 +37,13 @@ def _make_timestamp(): - return datetime.datetime.utcnow().replace(tzinfo=UTC) + return datetime.datetime.now(datetime.timezone.utc) def _make_table_admin_client(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient - return mock.create_autospec(BigtableTableAdminClient, instance=True) + return mock.create_autospec(BaseBigtableTableAdminClient, instance=True) def _make_backup(*args, **kwargs): @@ -735,7 +734,7 @@ def test_backup_restore_w_grpc_error(): client = _Client() api = client.table_admin_client = _make_table_admin_client() - api.restore_table.side_effect = Unknown("testing") + api._restore_table.side_effect = Unknown("testing") timestamp = _make_timestamp() backup = _make_backup( @@ -749,7 +748,7 @@ def test_backup_restore_w_grpc_error(): with pytest.raises(GoogleAPICallError): backup.restore(TABLE_ID) - api.restore_table.assert_called_once_with( + api._restore_table.assert_called_once_with( request={"parent": INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME} ) @@ -772,7 +771,7 @@ def _restore_helper(instance_id=None, instance_name=None): op_future = object() client = _Client() api = client.table_admin_client = _make_table_admin_client() - api.restore_table.return_value = op_future + api._restore_table.return_value = op_future timestamp = _make_timestamp() backup = _make_backup( @@ -787,14 +786,14 @@ def _restore_helper(instance_id=None, instance_name=None): assert backup._cluster == CLUSTER_ID assert future is op_future - api.restore_table.assert_called_once_with( + api._restore_table.assert_called_once_with( request={ "parent": instance_name or INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME, } ) - api.restore_table.reset_mock() + api._restore_table.reset_mock() def test_backup_restore_default(): @@ -808,7 +807,7 @@ def test_backup_restore_to_another_instance(): def test_backup_get_iam_policy(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -825,7 +824,7 @@ def test_backup_get_iam_policy(): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy @@ -844,7 +843,7 @@ def test_backup_get_iam_policy(): def test_backup_set_iam_policy(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy @@ -862,7 +861,7 @@ def test_backup_set_iam_policy(): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -889,7 +888,7 @@ def test_backup_set_iam_policy(): def test_backup_test_iam_permissions(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import iam_policy_pb2 @@ -903,7 +902,7 @@ def test_backup_test_iam_permissions(): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api diff --git a/tests/unit/test_batcher.py b/tests/unit/v2_client/test_batcher.py similarity index 98% rename from tests/unit/test_batcher.py rename to tests/unit/v2_client/test_batcher.py index 998748141..fcf606972 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/v2_client/test_batcher.py @@ -59,7 +59,6 @@ def callback_fn(response): def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - rows = [ DirectRow(row_key=b"row_key"), DirectRow(row_key=b"row_key_2"), @@ -75,7 +74,6 @@ def test_mutation_batcher_mutate_row(): def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -98,7 +96,6 @@ def test_mutation_batcher_flush_w_no_rows(): def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") row_2 = DirectRow(row_key=b"row_key_2") row_3 = DirectRow(row_key=b"row_key_3") @@ -114,7 +111,6 @@ def test_mutation_batcher_mutate_w_max_flush_count(): def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -130,7 +126,6 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -168,7 +163,6 @@ def test_mutations_batcher_context_manager_flushed_when_closed(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -204,7 +198,7 @@ def test_mutations_batcher_response_with_error_codes(): mocked_response = [Status(code=1), Status(code=5)] - with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + with mock.patch("tests.unit.v2_client.test_batcher._Table") as mocked_table: table = mocked_table.return_value mutation_batcher = MutationsBatcher(table=table) diff --git a/tests/unit/test_client.py b/tests/unit/v2_client/test_client.py similarity index 90% rename from tests/unit/test_client.py rename to tests/unit/v2_client/test_client.py index 5944c58a3..a4fc0f9cb 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/v2_client/test_client.py @@ -173,10 +173,13 @@ def test_client_constructor_w_emulator_host(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: + factory.return_value = channel client = _make_client() # don't test local_composite_credentials # client._local_composite_credentials = lambda: credentials @@ -188,7 +191,6 @@ def test_client_constructor_w_emulator_host(): assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -196,10 +198,12 @@ def test_client_constructor_w_emulator_host(): def test_client_constructor_w_emulator_host_w_project(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: client = _make_client(project=PROJECT) # channels are formed when needed, so access a client # create a gapic channel @@ -209,7 +213,6 @@ def test_client_constructor_w_emulator_host_w_project(): assert client.project == PROJECT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -218,11 +221,13 @@ def test_client_constructor_w_emulator_host_w_credentials(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" credentials = _make_credentials() with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: client = _make_client(credentials=credentials) # channels are formed when needed, so access a client # create a gapic channel @@ -232,7 +237,6 @@ def test_client_constructor_w_emulator_host_w_credentials(): assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -271,15 +275,13 @@ def test_client__emulator_channel_w_sync(): project=PROJECT, credentials=_make_credentials(), read_only=True ) client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) - with mock.patch("grpc.secure_channel") as patched: + with mock.patch("grpc.insecure_channel") as patched: channel = client._emulator_channel(transport, options) assert channel is patched.return_value patched.assert_called_once_with( emulator_host, - lcc.return_value, options=options, ) @@ -293,56 +295,17 @@ def test_client__emulator_channel_w_async(): project=PROJECT, credentials=_make_credentials(), read_only=True ) client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) - with mock.patch("grpc.aio.secure_channel") as patched: + with mock.patch("grpc.aio.insecure_channel") as patched: channel = client._emulator_channel(transport, options) assert channel is patched.return_value patched.assert_called_once_with( emulator_host, - lcc.return_value, options=options, ) -def test_client__local_composite_credentials(): - client = _make_client( - project=PROJECT, credentials=_make_credentials(), read_only=True - ) - - wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required") - request_patch = mock.patch("google.auth.transport.requests.Request") - amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin") - grpc_patches = mock.patch.multiple( - "grpc", - metadata_call_credentials=mock.DEFAULT, - local_channel_credentials=mock.DEFAULT, - composite_channel_credentials=mock.DEFAULT, - ) - with wsir_patch as wsir_patched: - with request_patch as request_patched: - with amp_patch as amp_patched: - with grpc_patches as grpc_patched: - credentials = client._local_composite_credentials() - - grpc_mcc = grpc_patched["metadata_call_credentials"] - grpc_lcc = grpc_patched["local_channel_credentials"] - grpc_ccc = grpc_patched["composite_channel_credentials"] - - assert credentials is grpc_ccc.return_value - - wsir_patched.assert_called_once_with(client._credentials, None) - request_patched.assert_called_once_with() - amp_patched.assert_called_once_with( - wsir_patched.return_value, - request_patched.return_value, - ) - grpc_mcc.assert_called_once_with(amp_patched.return_value) - grpc_lcc.assert_called_once_with() - grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value) - - def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS @@ -486,18 +449,18 @@ def test_client_table_admin_client_not_initialized_no_admin_flag(): def test_client_table_admin_client_not_initialized_w_admin_flag(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) table_admin_client = client.table_admin_client - assert isinstance(table_admin_client, BigtableTableAdminClient) + assert isinstance(table_admin_client, BaseBigtableTableAdminClient) assert client._table_admin_client is table_admin_client def test_client_table_admin_client_not_initialized_w_client_info(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient credentials = _make_credentials() client_info = mock.Mock() @@ -509,7 +472,7 @@ def test_client_table_admin_client_not_initialized_w_client_info(): ) table_admin_client = client.table_admin_client - assert isinstance(table_admin_client, BigtableTableAdminClient) + assert isinstance(table_admin_client, BaseBigtableTableAdminClient) assert client._client_info is client_info assert client._table_admin_client is table_admin_client @@ -525,7 +488,7 @@ def test_client_table_admin_client_not_initialized_w_client_options(): ) client._create_gapic_client_channel = mock.Mock() - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") + patch = mock.patch("google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient") with patch as mocked: table_admin_client = client.table_admin_client diff --git a/tests/unit/test_cluster.py b/tests/unit/v2_client/test_cluster.py similarity index 98% rename from tests/unit/test_cluster.py rename to tests/unit/v2_client/test_cluster.py index cb0312b0c..a21104549 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/v2_client/test_cluster.py @@ -420,7 +420,7 @@ def test_cluster_create(): from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -475,7 +475,7 @@ def test_cluster_create_w_cmek(): from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -535,7 +535,7 @@ def test_cluster_create_w_autoscaling(): from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -602,7 +602,7 @@ def test_cluster_update(): ) from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() @@ -669,7 +669,7 @@ def test_cluster_update_w_autoscaling(): ) from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() @@ -728,7 +728,7 @@ def test_cluster_update_w_partial_autoscaling_config(): ) from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() @@ -752,7 +752,6 @@ def test_cluster_update_w_partial_autoscaling_config(): }, ] for config in cluster_config: - cluster = _make_cluster( CLUSTER_ID, instance, @@ -813,7 +812,7 @@ def test_cluster_update_w_both_manual_and_autoscaling(): ) from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() @@ -874,7 +873,7 @@ def test_cluster_disable_autoscaling(): from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -927,7 +926,6 @@ def test_cluster_disable_autoscaling(): def test_create_cluster_with_both_manual_and_autoscaling(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -955,7 +953,6 @@ def test_create_cluster_with_both_manual_and_autoscaling(): def test_create_cluster_with_partial_autoscaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -996,7 +993,6 @@ def test_create_cluster_with_partial_autoscaling_config(): def test_create_cluster_with_no_scaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType diff --git a/tests/unit/test_column_family.py b/tests/unit/v2_client/test_column_family.py similarity index 97% rename from tests/unit/test_column_family.py rename to tests/unit/v2_client/test_column_family.py index b464024a7..2480e11cb 100644 --- a/tests/unit/test_column_family.py +++ b/tests/unit/v2_client/test_column_family.py @@ -336,9 +336,9 @@ def _create_test_helper(gc_rule=None): from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -357,7 +357,7 @@ def _create_test_helper(gc_rule=None): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) @@ -404,12 +404,12 @@ def test_column_family_create_with_gc_rule(): def _update_test_helper(gc_rule=None): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -428,7 +428,7 @@ def _update_test_helper(gc_rule=None): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) @@ -478,9 +478,9 @@ def test_column_family_delete(): from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -499,7 +499,7 @@ def test_column_family_delete(): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) @@ -595,7 +595,6 @@ def test__gc_rule_from_pb_unknown_field_name(): from google.cloud.bigtable.column_family import _gc_rule_from_pb class MockProto(object): - names = [] _pb = {} diff --git a/tests/unit/test_encryption_info.py b/tests/unit/v2_client/test_encryption_info.py similarity index 100% rename from tests/unit/test_encryption_info.py rename to tests/unit/v2_client/test_encryption_info.py diff --git a/tests/unit/test_error.py b/tests/unit/v2_client/test_error.py similarity index 100% rename from tests/unit/test_error.py rename to tests/unit/v2_client/test_error.py diff --git a/tests/unit/test_instance.py b/tests/unit/v2_client/test_instance.py similarity index 97% rename from tests/unit/test_instance.py rename to tests/unit/v2_client/test_instance.py index c577adca5..c5ef9c9b8 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/v2_client/test_instance.py @@ -19,6 +19,7 @@ from ._testing import _make_credentials from google.cloud.bigtable.cluster import Cluster + PROJECT = "project" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID @@ -67,7 +68,6 @@ def _make_instance(*args, **kwargs): def test_instance_constructor_defaults(): - client = object() instance = _make_instance(INSTANCE_ID, client) assert instance.instance_id == INSTANCE_ID @@ -277,7 +277,7 @@ def _instance_api_response_for_create(): ) from google.cloud.bigtable_admin_v2.types import instance - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( @@ -503,7 +503,7 @@ def _instance_api_response_for_update(): ) from google.cloud.bigtable_admin_v2.types import instance - NOW = datetime.datetime.utcnow() + NOW = datetime.datetime.now(datetime.timezone.utc) NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( @@ -806,7 +806,7 @@ def _list_tables_helper(table_name=None): bigtable_table_admin as table_messages_v1_pb2, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) credentials = _make_credentials() @@ -816,7 +816,7 @@ def _list_tables_helper(table_name=None): instance_api = client._instance_admin_client = _make_instance_admin_api() instance_api.instance_path.return_value = "projects/project/instances/instance-id" table_api = client._table_admin_client = mock.create_autospec( - BigtableTableAdminClient + BaseBigtableTableAdminClient ) if table_name is None: table_name = TABLE_NAME @@ -944,3 +944,28 @@ def _next_page(self): assert isinstance(app_profile_2, AppProfile) assert app_profile_2.name == app_profile_name2 + + +@pytest.fixture() +def data_api(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + data_api_mock = mock.create_autospec(BigtableClient) + data_api_mock.instance_path.return_value = ( + f"projects/{PROJECT}/instances/{INSTANCE_ID}" + ) + return data_api_mock + + +@pytest.fixture() +def client(data_api): + result = _make_client( + project="project-id", credentials=_make_credentials(), admin=True + ) + result._table_data_client = data_api + return result + + +@pytest.fixture() +def instance(client): + return client.instance(instance_id=INSTANCE_ID) diff --git a/tests/unit/test_policy.py b/tests/unit/v2_client/test_policy.py similarity index 100% rename from tests/unit/test_policy.py rename to tests/unit/v2_client/test_policy.py diff --git a/tests/unit/test_row.py b/tests/unit/v2_client/test_row.py similarity index 99% rename from tests/unit/test_row.py rename to tests/unit/v2_client/test_row.py index 49bbfc45c..f04802f5c 100644 --- a/tests/unit/test_row.py +++ b/tests/unit/v2_client/test_row.py @@ -480,7 +480,7 @@ def test_conditional_row_commit_too_many_mutations(): def test_conditional_row_commit_no_mutations(): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub project_id = "project-id" row_key = b"row_key" @@ -607,7 +607,7 @@ def mock_parse_rmw_row_response(row_response): def test_append_row_commit_no_rules(): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub project_id = "project-id" row_key = b"row_key" diff --git a/tests/unit/test_row_data.py b/tests/unit/v2_client/test_row_data.py similarity index 97% rename from tests/unit/test_row_data.py rename to tests/unit/v2_client/test_row_data.py index fba69ceba..7c2987b56 100644 --- a/tests/unit/test_row_data.py +++ b/tests/unit/v2_client/test_row_data.py @@ -362,6 +362,30 @@ def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc(): assert _retry_read_rows_exception(exception) +def test_partial_cell_data(): + from google.cloud.bigtable.row_data import PartialCellData + + expected_key = b"row-key" + expected_family_name = b"family-name" + expected_qualifier = b"qualifier" + expected_timestamp = 1234 + instance = PartialCellData( + expected_key, expected_family_name, expected_qualifier, expected_timestamp + ) + assert instance.row_key == expected_key + assert instance.family_name == expected_family_name + assert instance.qualifier == expected_qualifier + assert instance.timestamp_micros == expected_timestamp + assert instance.value == b"" + assert instance.labels == () + # test updating value + added_value = b"added-value" + instance.append_value(added_value) + assert instance.value == added_value + instance.append_value(added_value) + assert instance.value == added_value + added_value + + def _make_partial_rows_data(*args, **kwargs): from google.cloud.bigtable.row_data import PartialRowsData @@ -1118,7 +1142,6 @@ def test_RRRM_build_updated_request_row_ranges_valid(): class _MockCancellableIterator(object): - cancel_calls = 0 def __init__(self, *values): @@ -1199,5 +1222,4 @@ def _read_rows_retry_exception(exc): class _Client(object): - data_stub = None diff --git a/tests/unit/test_row_filters.py b/tests/unit/v2_client/test_row_filters.py similarity index 100% rename from tests/unit/test_row_filters.py rename to tests/unit/v2_client/test_row_filters.py diff --git a/tests/unit/test_row_merger.py b/tests/unit/v2_client/test_row_merger.py similarity index 100% rename from tests/unit/test_row_merger.py rename to tests/unit/v2_client/test_row_merger.py diff --git a/tests/unit/test_row_set.py b/tests/unit/v2_client/test_row_set.py similarity index 100% rename from tests/unit/test_row_set.py rename to tests/unit/v2_client/test_row_set.py diff --git a/tests/unit/test_table.py b/tests/unit/v2_client/test_table.py similarity index 99% rename from tests/unit/test_table.py rename to tests/unit/v2_client/test_table.py index 3d7d2e8ee..6b31a5e23 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/v2_client/test_table.py @@ -349,7 +349,7 @@ def _make_table_api(): client as bigtable_table_admin, ) - return mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + return mock.create_autospec(bigtable_table_admin.BaseBigtableTableAdminClient) def _create_table_helper(split_keys=[], column_families={}): @@ -1067,8 +1067,8 @@ def test_table_yield_retry_rows(): for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] result = rows[1] assert result.row_key == ROW_KEY_2 @@ -1140,8 +1140,8 @@ def test_table_yield_rows_with_row_set(): for row in table.yield_rows(row_set=row_set): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] assert rows[0].row_key == ROW_KEY_1 assert rows[1].row_key == ROW_KEY_2 @@ -1378,13 +1378,12 @@ def test_table_backup_factory_defaults(): def test_table_backup_factory_non_defaults(): import datetime - from google.cloud._helpers import UTC from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.instance import Instance instance = Instance(INSTANCE_ID, None) table = _make_table(TABLE_ID, instance) - timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp = datetime.datetime.now(datetime.timezone.utc) backup = table.backup( BACKUP_ID, cluster_id=CLUSTER_ID, @@ -1482,7 +1481,7 @@ def _table_restore_helper(backup_name=None): table = _make_table(TABLE_ID, instance) table_api = client._table_admin_client = _make_table_api() - table_api.restore_table.return_value = op_future + table_api._restore_table.return_value = op_future if backup_name: future = table.restore(TABLE_ID, backup_name=BACKUP_NAME) @@ -1496,7 +1495,7 @@ def _table_restore_helper(backup_name=None): "table_id": TABLE_ID, "backup": BACKUP_NAME, } - table_api.restore_table.assert_called_once_with(request=expected_request) + table_api._restore_table.assert_called_once_with(request=expected_request) def test_table_restore_table_w_backup_id(): @@ -1689,7 +1688,6 @@ def _do_mutate_retryable_rows_helper( expected_entries = [] for row, prior_status in zip(rows, worker.responses_statuses): - if prior_status is None or prior_status.code in RETRYABLES: mutations = row._get_mutations().copy() # row clears on success entry = data_messages_v2_pb2.MutateRowsRequest.Entry(